本文整理匯總了Golang中github.com/docker/swarmkit/manager/state/store.FindNodes函數的典型用法代碼示例。如果您正苦於以下問題:Golang FindNodes函數的具體用法?Golang FindNodes怎麽用?Golang FindNodes使用的例子?那麽, 這裏精選的函數代碼示例或許可以為您提供幫助。
在下文中一共展示了FindNodes函數的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: TestHeartbeatTimeout
func TestHeartbeatTimeout(t *testing.T) {
cfg := DefaultConfig()
cfg.HeartbeatPeriod = 100 * time.Millisecond
cfg.HeartbeatEpsilon = 0
gd, err := startDispatcher(cfg)
assert.NoError(t, err)
defer gd.Close()
var expectedSessionID string
{
stream, err := gd.Clients[0].Session(context.Background(), &api.SessionRequest{})
assert.NoError(t, err)
resp, err := stream.Recv()
assert.NoError(t, err)
assert.NotEmpty(t, resp.SessionID)
expectedSessionID = resp.SessionID
}
time.Sleep(500 * time.Millisecond)
gd.Store.View(func(readTx store.ReadTx) {
storeNodes, err := store.FindNodes(readTx, store.All)
assert.NoError(t, err)
assert.NotEmpty(t, storeNodes)
assert.Equal(t, api.NodeStatus_DOWN, storeNodes[0].Status.State)
})
// check that node is deregistered
resp, err := gd.Clients[0].Heartbeat(context.Background(), &api.HeartbeatRequest{SessionID: expectedSessionID})
assert.Nil(t, resp)
assert.Error(t, err)
assert.Equal(t, grpc.ErrorDesc(err), ErrNodeNotRegistered.Error())
}
示例2: Run
// Run is the ConstraintEnforcer's main loop.
func (ce *ConstraintEnforcer) Run() {
defer close(ce.doneChan)
watcher, cancelWatch := state.Watch(ce.store.WatchQueue(), state.EventUpdateNode{})
defer cancelWatch()
var (
nodes []*api.Node
err error
)
ce.store.View(func(readTx store.ReadTx) {
nodes, err = store.FindNodes(readTx, store.All)
})
if err != nil {
log.L.WithError(err).Error("failed to check nodes for noncompliant tasks")
} else {
for _, node := range nodes {
ce.shutdownNoncompliantTasks(node)
}
}
for {
select {
case event := <-watcher:
node := event.(state.EventUpdateNode).Node
ce.shutdownNoncompliantTasks(node)
case <-ce.stopChan:
return
}
}
}
示例3: testRaftRestartCluster
func testRaftRestartCluster(t *testing.T, stagger bool) {
nodes, clockSource := raftutils.NewRaftCluster(t, tc)
defer raftutils.TeardownCluster(t, nodes)
// Propose a value
values := make([]*api.Node, 2)
var err error
values[0], err = raftutils.ProposeValue(t, nodes[1], "id1")
assert.NoError(t, err, "failed to propose value")
// Stop all nodes
for _, node := range nodes {
node.Server.Stop()
node.Shutdown()
}
raftutils.AdvanceTicks(clockSource, 5)
// Restart all nodes
i := 0
for k, node := range nodes {
if stagger && i != 0 {
raftutils.AdvanceTicks(clockSource, 1)
}
nodes[k] = raftutils.RestartNode(t, clockSource, node, false)
i++
}
raftutils.WaitForCluster(t, clockSource, nodes)
// Propose another value
values[1], err = raftutils.ProposeValue(t, raftutils.Leader(nodes), "id2")
assert.NoError(t, err, "failed to propose value")
for _, node := range nodes {
assert.NoError(t, raftutils.PollFunc(clockSource, func() error {
var err error
node.MemoryStore().View(func(tx store.ReadTx) {
var allNodes []*api.Node
allNodes, err = store.FindNodes(tx, store.All)
if err != nil {
return
}
if len(allNodes) != 2 {
err = fmt.Errorf("expected 2 nodes, got %d", len(allNodes))
return
}
for i, nodeID := range []string{"id1", "id2"} {
n := store.GetNode(tx, nodeID)
if !reflect.DeepEqual(n, values[i]) {
err = fmt.Errorf("node %s did not match expected value", nodeID)
return
}
}
})
return err
}))
}
}
示例4: Run
// Run is roleManager's main loop.
func (rm *roleManager) Run() {
defer close(rm.doneChan)
var (
nodes []*api.Node
ticker *time.Ticker
tickerCh <-chan time.Time
)
watcher, cancelWatch, err := store.ViewAndWatch(rm.store,
func(readTx store.ReadTx) error {
var err error
nodes, err = store.FindNodes(readTx, store.All)
return err
},
state.EventUpdateNode{})
defer cancelWatch()
if err != nil {
log.L.WithError(err).Error("failed to check nodes for role changes")
} else {
for _, node := range nodes {
rm.pending[node.ID] = node
rm.reconcileRole(node)
}
if len(rm.pending) != 0 {
ticker = time.NewTicker(roleReconcileInterval)
tickerCh = ticker.C
}
}
for {
select {
case event := <-watcher:
node := event.(state.EventUpdateNode).Node
rm.pending[node.ID] = node
rm.reconcileRole(node)
if len(rm.pending) != 0 && ticker == nil {
ticker = time.NewTicker(roleReconcileInterval)
tickerCh = ticker.C
}
case <-tickerCh:
for _, node := range rm.pending {
rm.reconcileRole(node)
}
if len(rm.pending) == 0 {
ticker.Stop()
ticker = nil
tickerCh = nil
}
case <-rm.ctx.Done():
if ticker != nil {
ticker.Stop()
}
return
}
}
}
示例5: UpdateNode
// UpdateNode updates a Node referenced by NodeID with the given NodeSpec.
// - Returns `NotFound` if the Node is not found.
// - Returns `InvalidArgument` if the NodeSpec is malformed.
// - Returns an error if the update fails.
func (s *Server) UpdateNode(ctx context.Context, request *api.UpdateNodeRequest) (*api.UpdateNodeResponse, error) {
if request.NodeID == "" || request.NodeVersion == nil {
return nil, grpc.Errorf(codes.InvalidArgument, errInvalidArgument.Error())
}
if err := validateNodeSpec(request.Spec); err != nil {
return nil, err
}
var (
node *api.Node
demote bool
)
err := s.store.Update(func(tx store.Tx) error {
node = store.GetNode(tx, request.NodeID)
if node == nil {
return nil
}
// Demotion sanity checks.
if node.Spec.Role == api.NodeRoleManager && request.Spec.Role == api.NodeRoleWorker {
demote = true
managers, err := store.FindNodes(tx, store.ByRole(api.NodeRoleManager))
if err != nil {
return grpc.Errorf(codes.Internal, "internal store error: %v", err)
}
if len(managers) == 1 && managers[0].ID == node.ID {
return grpc.Errorf(codes.FailedPrecondition, "attempting to demote the last manager of the swarm")
}
}
node.Meta.Version = *request.NodeVersion
node.Spec = *request.Spec.Copy()
return store.UpdateNode(tx, node)
})
if err != nil {
return nil, err
}
if node == nil {
return nil, grpc.Errorf(codes.NotFound, "node %s not found", request.NodeID)
}
if demote && s.raft != nil {
memberlist := s.raft.GetMemberlist()
for raftID, member := range memberlist {
if member.NodeID == request.NodeID {
if err := s.raft.RemoveMember(ctx, raftID); err != nil {
return nil, err
}
break
}
}
}
return &api.UpdateNodeResponse{
Node: node,
}, nil
}
示例6: UpdateNode
// UpdateNode updates a Node referenced by NodeID with the given NodeSpec.
// - Returns `NotFound` if the Node is not found.
// - Returns `InvalidArgument` if the NodeSpec is malformed.
// - Returns an error if the update fails.
func (s *Server) UpdateNode(ctx context.Context, request *api.UpdateNodeRequest) (*api.UpdateNodeResponse, error) {
if request.NodeID == "" || request.NodeVersion == nil {
return nil, grpc.Errorf(codes.InvalidArgument, errInvalidArgument.Error())
}
if err := validateNodeSpec(request.Spec); err != nil {
return nil, err
}
var (
node *api.Node
member *membership.Member
)
err := s.store.Update(func(tx store.Tx) error {
node = store.GetNode(tx, request.NodeID)
if node == nil {
return grpc.Errorf(codes.NotFound, "node %s not found", request.NodeID)
}
// Demotion sanity checks.
if node.Spec.DesiredRole == api.NodeRoleManager && request.Spec.DesiredRole == api.NodeRoleWorker {
// Check for manager entries in Store.
managers, err := store.FindNodes(tx, store.ByRole(api.NodeRoleManager))
if err != nil {
return grpc.Errorf(codes.Internal, "internal store error: %v", err)
}
if len(managers) == 1 && managers[0].ID == node.ID {
return grpc.Errorf(codes.FailedPrecondition, "attempting to demote the last manager of the swarm")
}
// Check for node in memberlist
if member = s.raft.GetMemberByNodeID(request.NodeID); member == nil {
return grpc.Errorf(codes.NotFound, "can't find manager in raft memberlist")
}
// Quorum safeguard
if !s.raft.CanRemoveMember(member.RaftID) {
return grpc.Errorf(codes.FailedPrecondition, "can't remove member from the raft: this would result in a loss of quorum")
}
}
node.Meta.Version = *request.NodeVersion
node.Spec = *request.Spec.Copy()
return store.UpdateNode(tx, node)
})
if err != nil {
return nil, err
}
return &api.UpdateNodeResponse{
Node: node,
}, nil
}
示例7: markNodesUnknown
func (d *Dispatcher) markNodesUnknown(ctx context.Context) error {
log := log.G(ctx).WithField("method", "(*Dispatcher).markNodesUnknown")
var nodes []*api.Node
var err error
d.store.View(func(tx store.ReadTx) {
nodes, err = store.FindNodes(tx, store.All)
})
if err != nil {
return fmt.Errorf("failed to get list of nodes: %v", err)
}
_, err = d.store.Batch(func(batch *store.Batch) error {
for _, n := range nodes {
err := batch.Update(func(tx store.Tx) error {
// check if node is still here
node := store.GetNode(tx, n.ID)
if node == nil {
return nil
}
// do not try to resurrect down nodes
if node.Status.State == api.NodeStatus_DOWN {
return nil
}
node.Status = api.NodeStatus{
State: api.NodeStatus_UNKNOWN,
Message: `Node moved to "unknown" state due to leadership change in cluster`,
}
nodeID := node.ID
expireFunc := func() {
log := log.WithField("node", nodeID)
nodeStatus := api.NodeStatus{State: api.NodeStatus_DOWN, Message: `heartbeat failure for node in "unknown" state`}
log.Debugf("heartbeat expiration for unknown node")
if err := d.nodeRemove(nodeID, nodeStatus); err != nil {
log.WithError(err).Errorf(`failed deregistering node after heartbeat expiration for node in "unknown" state`)
}
}
if err := d.nodes.AddUnknown(node, expireFunc); err != nil {
return fmt.Errorf(`adding node in "unknown" state to node store failed: %v`, err)
}
if err := store.UpdateNode(tx, node); err != nil {
return fmt.Errorf("update failed %v", err)
}
return nil
})
if err != nil {
log.WithField("node", n.ID).WithError(err).Errorf(`failed to move node to "unknown" state`)
}
}
return nil
})
return err
}
示例8: TestHeartbeat
func TestHeartbeat(t *testing.T) {
cfg := DefaultConfig()
cfg.HeartbeatPeriod = 500 * time.Millisecond
cfg.HeartbeatEpsilon = 0
gd, err := startDispatcher(DefaultConfig())
assert.NoError(t, err)
defer gd.Close()
var expectedSessionID string
{
stream, err := gd.Clients[0].Session(context.Background(), &api.SessionRequest{})
assert.NoError(t, err)
defer stream.CloseSend()
resp, err := stream.Recv()
assert.NoError(t, err)
assert.NotEmpty(t, resp.SessionID)
expectedSessionID = resp.SessionID
}
time.Sleep(250 * time.Millisecond)
{
// heartbeat without correct SessionID should fail
resp, err := gd.Clients[0].Heartbeat(context.Background(), &api.HeartbeatRequest{})
assert.Nil(t, resp)
assert.Error(t, err)
assert.Equal(t, grpc.Code(err), codes.InvalidArgument)
}
resp, err := gd.Clients[0].Heartbeat(context.Background(), &api.HeartbeatRequest{SessionID: expectedSessionID})
assert.NoError(t, err)
assert.NotZero(t, resp.Period)
time.Sleep(300 * time.Millisecond)
gd.Store.View(func(readTx store.ReadTx) {
storeNodes, err := store.FindNodes(readTx, store.All)
assert.NoError(t, err)
assert.NotEmpty(t, storeNodes)
found := false
for _, node := range storeNodes {
if node.ID == gd.SecurityConfigs[0].ClientTLSCreds.NodeID() {
found = true
assert.Equal(t, api.NodeStatus_READY, node.Status.State)
}
}
assert.True(t, found)
})
}
示例9: buildNodeSet
func (s *Scheduler) buildNodeSet(tx store.ReadTx, tasksByNode map[string]map[string]*api.Task) error {
nodes, err := store.FindNodes(tx, store.All)
if err != nil {
return err
}
s.nodeSet.alloc(len(nodes))
for _, n := range nodes {
var resources api.Resources
if n.Description != nil && n.Description.Resources != nil {
resources = *n.Description.Resources
}
s.nodeSet.addOrUpdateNode(newNodeInfo(n, tasksByNode[n.ID], resources))
}
return nil
}
示例10: TestClusterReelection
func TestClusterReelection(t *testing.T) {
if !*integrationTests {
t.Skip("integration test")
}
mCount, aCount := 5, 15
c := createManagersCluster(t, mCount, aCount)
require.NoError(t, testutils.PollFunc(nil, c.pollRegister))
require.NoError(t, c.destroyLeader())
// let's down some managers in the meantime
require.NoError(t, c.destroyAgents(5))
// ensure that cluster will converge to expected number of agents, we need big timeout because of heartbeat times
require.NoError(t, testutils.PollFuncWithTimeout(nil, c.pollRegister, 30*time.Second))
leader, err := c.leader()
assert.NoError(t, err)
// check nodes in store
var nodes []*api.Node
leader.m.RaftNode.MemoryStore().View(func(tx store.ReadTx) {
ns, err := store.FindNodes(tx, store.All)
assert.NoError(t, err)
for _, n := range ns {
if n.Spec.Role == api.NodeRoleWorker {
nodes = append(nodes, n)
}
}
})
assert.NoError(t, err)
assert.Len(t, nodes, aCount, "there should be all nodes in store")
var downAgentsCount int
for _, node := range nodes {
if node.Status.State == api.NodeStatus_DOWN {
downAgentsCount++
continue
}
assert.Equal(t, api.NodeStatus_READY, node.Status.State, "there should be only down and ready nodes at this point")
}
assert.Equal(t, 5, downAgentsCount, "unexpected number of down agents")
}
示例11: buildNodeHeap
func (s *Scheduler) buildNodeHeap(tx store.ReadTx, tasksByNode map[string]map[string]*api.Task) error {
nodes, err := store.FindNodes(tx, store.All)
if err != nil {
return err
}
s.nodeHeap.alloc(len(nodes))
i := 0
for _, n := range nodes {
var resources api.Resources
if n.Description != nil && n.Description.Resources != nil {
resources = *n.Description.Resources
}
s.nodeHeap.heap = append(s.nodeHeap.heap, newNodeInfo(n, tasksByNode[n.ID], resources))
s.nodeHeap.index[n.ID] = i
i++
}
heap.Init(&s.nodeHeap)
return nil
}
示例12: ListNodes
// ListNodes returns a list of all nodes.
func (s *Server) ListNodes(ctx context.Context, request *api.ListNodesRequest) (*api.ListNodesResponse, error) {
var (
nodes []*api.Node
err error
)
s.store.View(func(tx store.ReadTx) {
switch {
case request.Filters != nil && len(request.Filters.Names) > 0:
nodes, err = store.FindNodes(tx, buildFilters(store.ByName, request.Filters.Names))
case request.Filters != nil && len(request.Filters.NamePrefixes) > 0:
nodes, err = store.FindNodes(tx, buildFilters(store.ByNamePrefix, request.Filters.NamePrefixes))
case request.Filters != nil && len(request.Filters.IDPrefixes) > 0:
nodes, err = store.FindNodes(tx, buildFilters(store.ByIDPrefix, request.Filters.IDPrefixes))
case request.Filters != nil && len(request.Filters.Roles) > 0:
filters := make([]store.By, 0, len(request.Filters.Roles))
for _, v := range request.Filters.Roles {
filters = append(filters, store.ByRole(v))
}
nodes, err = store.FindNodes(tx, store.Or(filters...))
case request.Filters != nil && len(request.Filters.Memberships) > 0:
filters := make([]store.By, 0, len(request.Filters.Memberships))
for _, v := range request.Filters.Memberships {
filters = append(filters, store.ByMembership(v))
}
nodes, err = store.FindNodes(tx, store.Or(filters...))
default:
nodes, err = store.FindNodes(tx, store.All)
}
})
if err != nil {
return nil, err
}
if request.Filters != nil {
nodes = filterNodes(nodes,
func(e *api.Node) bool {
if len(request.Filters.Names) == 0 {
return true
}
if e.Description == nil {
return false
}
return filterContains(e.Description.Hostname, request.Filters.Names)
},
func(e *api.Node) bool {
if len(request.Filters.NamePrefixes) == 0 {
return true
}
if e.Description == nil {
return false
}
return filterContainsPrefix(e.Description.Hostname, request.Filters.NamePrefixes)
},
func(e *api.Node) bool {
return filterContainsPrefix(e.ID, request.Filters.IDPrefixes)
},
func(e *api.Node) bool {
if len(request.Filters.Labels) == 0 {
return true
}
if e.Description == nil {
return false
}
return filterMatchLabels(e.Description.Engine.Labels, request.Filters.Labels)
},
func(e *api.Node) bool {
if len(request.Filters.Roles) == 0 {
return true
}
for _, c := range request.Filters.Roles {
if c == e.Spec.Role {
return true
}
}
return false
},
func(e *api.Node) bool {
if len(request.Filters.Memberships) == 0 {
return true
}
for _, c := range request.Filters.Memberships {
if c == e.Spec.Membership {
return true
}
}
return false
},
)
}
// Add in manager information on nodes that are managers
if s.raft != nil {
memberlist := s.raft.GetMemberlist()
for _, node := range nodes {
for _, member := range memberlist {
if member.NodeID == node.ID {
node.ManagerStatus = &api.ManagerStatus{
RaftID: member.RaftID,
//.........這裏部分代碼省略.........
示例13: doNetworkInit
//.........這裏部分代碼省略.........
networks, err = store.FindNetworks(tx, store.All)
})
if err != nil {
return errors.Wrap(err, "error listing all networks in store while trying to allocate during init")
}
var allocatedNetworks []*api.Network
for _, n := range networks {
if na.IsAllocated(n) {
continue
}
if err := a.allocateNetwork(ctx, n); err != nil {
log.G(ctx).WithError(err).Errorf("failed allocating network %s during init", n.ID)
continue
}
allocatedNetworks = append(allocatedNetworks, n)
}
if _, err := a.store.Batch(func(batch *store.Batch) error {
for _, n := range allocatedNetworks {
if err := a.commitAllocatedNetwork(ctx, batch, n); err != nil {
log.G(ctx).WithError(err).Errorf("failed committing allocation of network %s during init", n.ID)
}
}
return nil
}); err != nil {
log.G(ctx).WithError(err).Error("failed committing allocation of networks during init")
}
// Allocate nodes in the store so far before we process watched events.
var nodes []*api.Node
a.store.View(func(tx store.ReadTx) {
nodes, err = store.FindNodes(tx, store.All)
})
if err != nil {
return errors.Wrap(err, "error listing all nodes in store while trying to allocate during init")
}
var allocatedNodes []*api.Node
for _, node := range nodes {
if na.IsNodeAllocated(node) {
continue
}
if node.Attachment == nil {
node.Attachment = &api.NetworkAttachment{}
}
node.Attachment.Network = nc.ingressNetwork.Copy()
if err := a.allocateNode(ctx, node); err != nil {
log.G(ctx).WithError(err).Errorf("Failed to allocate network resources for node %s during init", node.ID)
continue
}
allocatedNodes = append(allocatedNodes, node)
}
if _, err := a.store.Batch(func(batch *store.Batch) error {
for _, node := range allocatedNodes {
if err := a.commitAllocatedNode(ctx, batch, node); err != nil {
log.G(ctx).WithError(err).Errorf("Failed to commit allocation of network resources for node %s during init", node.ID)
}
}
return nil
}); err != nil {
示例14: TestRaftForceNewCluster
func TestRaftForceNewCluster(t *testing.T) {
t.Parallel()
nodes, clockSource := raftutils.NewRaftCluster(t, tc)
// Propose a value
values := make([]*api.Node, 2)
var err error
values[0], err = raftutils.ProposeValue(t, nodes[1], "id1")
assert.NoError(t, err, "failed to propose value")
// The memberlist should contain 3 members on each node
for i := 1; i <= 3; i++ {
assert.Equal(t, len(nodes[uint64(i)].GetMemberlist()), 3)
}
// Stop all nodes
for _, node := range nodes {
node.Server.Stop()
node.Shutdown()
}
raftutils.AdvanceTicks(clockSource, 5)
toClean := map[uint64]*raftutils.TestNode{
2: nodes[2],
3: nodes[3],
}
raftutils.TeardownCluster(t, toClean)
delete(nodes, 2)
delete(nodes, 3)
// Only restart the first node with force-new-cluster option
nodes[1] = raftutils.RestartNode(t, clockSource, nodes[1], true)
raftutils.WaitForCluster(t, clockSource, nodes)
// The memberlist should contain only one node (self)
assert.Equal(t, len(nodes[1].GetMemberlist()), 1)
// Add 2 more members
nodes[2] = raftutils.NewJoinNode(t, clockSource, nodes[1].Address, tc)
raftutils.WaitForCluster(t, clockSource, nodes)
nodes[3] = raftutils.NewJoinNode(t, clockSource, nodes[1].Address, tc)
raftutils.WaitForCluster(t, clockSource, nodes)
newCluster := map[uint64]*raftutils.TestNode{
1: nodes[1],
2: nodes[2],
3: nodes[3],
}
defer raftutils.TeardownCluster(t, newCluster)
// The memberlist should contain 3 members on each node
for i := 1; i <= 3; i++ {
assert.Equal(t, len(nodes[uint64(i)].GetMemberlist()), 3)
}
// Propose another value
values[1], err = raftutils.ProposeValue(t, raftutils.Leader(nodes), "id2")
assert.NoError(t, err, "failed to propose value")
for _, node := range nodes {
assert.NoError(t, raftutils.PollFunc(clockSource, func() error {
var err error
node.MemoryStore().View(func(tx store.ReadTx) {
var allNodes []*api.Node
allNodes, err = store.FindNodes(tx, store.All)
if err != nil {
return
}
if len(allNodes) != 2 {
err = fmt.Errorf("expected 2 nodes, got %d", len(allNodes))
return
}
for i, nodeID := range []string{"id1", "id2"} {
n := store.GetNode(tx, nodeID)
if !reflect.DeepEqual(n, values[i]) {
err = fmt.Errorf("node %s did not match expected value", nodeID)
return
}
}
})
return err
}))
}
}
示例15: markNodesUnknown
func (d *Dispatcher) markNodesUnknown(ctx context.Context) error {
log := log.G(ctx).WithField("method", "(*Dispatcher).markNodesUnknown")
var nodes []*api.Node
var err error
d.store.View(func(tx store.ReadTx) {
nodes, err = store.FindNodes(tx, store.All)
})
if err != nil {
return errors.Wrap(err, "failed to get list of nodes")
}
_, err = d.store.Batch(func(batch *store.Batch) error {
for _, n := range nodes {
err := batch.Update(func(tx store.Tx) error {
// check if node is still here
node := store.GetNode(tx, n.ID)
if node == nil {
return nil
}
// do not try to resurrect down nodes
if node.Status.State == api.NodeStatus_DOWN {
nodeCopy := node
expireFunc := func() {
if err := d.moveTasksToOrphaned(nodeCopy.ID); err != nil {
log.WithError(err).Error(`failed to move all tasks to "ORPHANED" state`)
}
d.downNodes.Delete(nodeCopy.ID)
}
d.downNodes.Add(nodeCopy, expireFunc)
return nil
}
node.Status.State = api.NodeStatus_UNKNOWN
node.Status.Message = `Node moved to "unknown" state due to leadership change in cluster`
nodeID := node.ID
expireFunc := func() {
log := log.WithField("node", nodeID)
log.Debugf("heartbeat expiration for unknown node")
if err := d.markNodeNotReady(nodeID, api.NodeStatus_DOWN, `heartbeat failure for node in "unknown" state`); err != nil {
log.WithError(err).Errorf(`failed deregistering node after heartbeat expiration for node in "unknown" state`)
}
}
if err := d.nodes.AddUnknown(node, expireFunc); err != nil {
return errors.Wrap(err, `adding node in "unknown" state to node store failed`)
}
if err := store.UpdateNode(tx, node); err != nil {
return errors.Wrap(err, "update failed")
}
return nil
})
if err != nil {
log.WithField("node", n.ID).WithError(err).Errorf(`failed to move node to "unknown" state`)
}
}
return nil
})
return err
}