本文整理匯總了Golang中github.com/docker/swarmkit/log.Debugf函數的典型用法代碼示例。如果您正苦於以下問題:Golang Debugf函數的具體用法?Golang Debugf怎麽用?Golang Debugf使用的例子?那麽, 這裏精選的函數代碼示例或許可以為您提供幫助。
在下文中一共展示了Debugf函數的9個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: logSubscriptions
func (s *session) logSubscriptions(ctx context.Context) error {
log := log.G(ctx).WithFields(logrus.Fields{"method": "(*session).logSubscriptions"})
log.Debugf("")
client := api.NewLogBrokerClient(s.conn)
subscriptions, err := client.ListenSubscriptions(ctx, &api.ListenSubscriptionsRequest{})
if err != nil {
return err
}
defer subscriptions.CloseSend()
for {
resp, err := subscriptions.Recv()
if err != nil {
return err
}
select {
case s.subscriptions <- resp:
case <-s.closed:
return errSessionClosed
case <-ctx.Done():
return ctx.Err()
}
}
}
示例2: markNodesUnknown
func (d *Dispatcher) markNodesUnknown(ctx context.Context) error {
log := log.G(ctx).WithField("method", "(*Dispatcher).markNodesUnknown")
var nodes []*api.Node
var err error
d.store.View(func(tx store.ReadTx) {
nodes, err = store.FindNodes(tx, store.All)
})
if err != nil {
return fmt.Errorf("failed to get list of nodes: %v", err)
}
_, err = d.store.Batch(func(batch *store.Batch) error {
for _, n := range nodes {
err := batch.Update(func(tx store.Tx) error {
// check if node is still here
node := store.GetNode(tx, n.ID)
if node == nil {
return nil
}
// do not try to resurrect down nodes
if node.Status.State == api.NodeStatus_DOWN {
return nil
}
node.Status = api.NodeStatus{
State: api.NodeStatus_UNKNOWN,
Message: `Node moved to "unknown" state due to leadership change in cluster`,
}
nodeID := node.ID
expireFunc := func() {
log := log.WithField("node", nodeID)
nodeStatus := api.NodeStatus{State: api.NodeStatus_DOWN, Message: `heartbeat failure for node in "unknown" state`}
log.Debugf("heartbeat expiration for unknown node")
if err := d.nodeRemove(nodeID, nodeStatus); err != nil {
log.WithError(err).Errorf(`failed deregistering node after heartbeat expiration for node in "unknown" state`)
}
}
if err := d.nodes.AddUnknown(node, expireFunc); err != nil {
return fmt.Errorf(`adding node in "unknown" state to node store failed: %v`, err)
}
if err := store.UpdateNode(tx, node); err != nil {
return fmt.Errorf("update failed %v", err)
}
return nil
})
if err != nil {
log.WithField("node", n.ID).WithError(err).Errorf(`failed to move node to "unknown" state`)
}
}
return nil
})
return err
}
示例3: Join
// Join asks to a member of the raft to propose
// a configuration change and add us as a member thus
// beginning the log replication process. This method
// is called from an aspiring member to an existing member
func (n *Node) Join(ctx context.Context, req *api.JoinRequest) (*api.JoinResponse, error) {
nodeInfo, err := ca.RemoteNode(ctx)
if err != nil {
return nil, err
}
fields := logrus.Fields{
"node.id": nodeInfo.NodeID,
"method": "(*Node).Join",
}
if nodeInfo.ForwardedBy != nil {
fields["forwarder.id"] = nodeInfo.ForwardedBy.NodeID
}
log := log.G(ctx).WithFields(fields)
raftID, err := identity.ParseNodeID(nodeInfo.NodeID)
if err != nil {
return nil, err
}
// can't stop the raft node while an async RPC is in progress
n.stopMu.RLock()
defer n.stopMu.RUnlock()
if n.Node == nil {
log.WithError(ErrStopped).Errorf(ErrStopped.Error())
return nil, ErrStopped
}
// We submit a configuration change only if the node was not registered yet
if n.cluster.GetMember(raftID) == nil {
err = n.addMember(ctx, req.Addr, raftID)
if err != nil {
log.WithError(err).Errorf("failed to add member")
return nil, err
}
}
var nodes []*api.RaftMember
for _, node := range n.cluster.Members() {
nodes = append(nodes, &api.RaftMember{
RaftID: node.RaftID,
Addr: node.Addr,
})
}
log.Debugf("node joined")
return &api.JoinResponse{Members: nodes}, nil
}
示例4: logSubscriptions
func (s *session) logSubscriptions(ctx context.Context) error {
log := log.G(ctx).WithFields(logrus.Fields{"method": "(*session).logSubscriptions"})
log.Debugf("")
client := api.NewLogBrokerClient(s.conn)
subscriptions, err := client.ListenSubscriptions(ctx, &api.ListenSubscriptionsRequest{})
if err != nil {
return err
}
defer subscriptions.CloseSend()
for {
resp, err := subscriptions.Recv()
if grpc.Code(err) == codes.Unimplemented {
log.Warning("manager does not support log subscriptions")
// Don't return, because returning would bounce the session
select {
case <-s.closed:
return errSessionClosed
case <-ctx.Done():
return ctx.Err()
}
}
if err != nil {
return err
}
select {
case s.subscriptions <- resp:
case <-s.closed:
return errSessionClosed
case <-ctx.Done():
return ctx.Err()
}
}
}
示例5: Join
// Join asks to a member of the raft to propose
// a configuration change and add us as a member thus
// beginning the log replication process. This method
// is called from an aspiring member to an existing member
func (n *Node) Join(ctx context.Context, req *api.JoinRequest) (*api.JoinResponse, error) {
nodeInfo, err := ca.RemoteNode(ctx)
if err != nil {
return nil, err
}
fields := logrus.Fields{
"node.id": nodeInfo.NodeID,
"method": "(*Node).Join",
"raft_id": fmt.Sprintf("%x", n.Config.ID),
}
if nodeInfo.ForwardedBy != nil {
fields["forwarder.id"] = nodeInfo.ForwardedBy.NodeID
}
log := log.G(ctx).WithFields(fields)
log.Debug("")
// can't stop the raft node while an async RPC is in progress
n.stopMu.RLock()
defer n.stopMu.RUnlock()
n.membershipLock.Lock()
defer n.membershipLock.Unlock()
if !n.IsMember() {
return nil, ErrNoRaftMember
}
if !n.isLeader() {
return nil, ErrLostLeadership
}
// Find a unique ID for the joining member.
var raftID uint64
for {
raftID = uint64(rand.Int63()) + 1
if n.cluster.GetMember(raftID) == nil && !n.cluster.IsIDRemoved(raftID) {
break
}
}
remoteAddr := req.Addr
// If the joining node sent an address like 0.0.0.0:4242, automatically
// determine its actual address based on the GRPC connection. This
// avoids the need for a prospective member to know its own address.
requestHost, requestPort, err := net.SplitHostPort(remoteAddr)
if err != nil {
return nil, fmt.Errorf("invalid address %s in raft join request", remoteAddr)
}
requestIP := net.ParseIP(requestHost)
if requestIP != nil && requestIP.IsUnspecified() {
remoteHost, _, err := net.SplitHostPort(nodeInfo.RemoteAddr)
if err != nil {
return nil, err
}
remoteAddr = net.JoinHostPort(remoteHost, requestPort)
}
// We do not bother submitting a configuration change for the
// new member if we can't contact it back using its address
if err := n.checkHealth(ctx, remoteAddr, 5*time.Second); err != nil {
return nil, err
}
err = n.addMember(ctx, remoteAddr, raftID, nodeInfo.NodeID)
if err != nil {
log.WithError(err).Errorf("failed to add member %x", raftID)
return nil, err
}
var nodes []*api.RaftMember
for _, node := range n.cluster.Members() {
nodes = append(nodes, &api.RaftMember{
RaftID: node.RaftID,
NodeID: node.NodeID,
Addr: node.Addr,
})
}
log.Debugf("node joined")
return &api.JoinResponse{Members: nodes, RaftID: raftID}, nil
}
示例6: Assignments
// Assignments is a stream of assignments for a node. Each message contains
// either full list of tasks and secrets for the node, or an incremental update.
func (d *Dispatcher) Assignments(r *api.AssignmentsRequest, stream api.Dispatcher_AssignmentsServer) error {
nodeInfo, err := ca.RemoteNode(stream.Context())
if err != nil {
return err
}
nodeID := nodeInfo.NodeID
dctx, err := d.isRunningLocked()
if err != nil {
return err
}
fields := logrus.Fields{
"node.id": nodeID,
"node.session": r.SessionID,
"method": "(*Dispatcher).Assignments",
}
if nodeInfo.ForwardedBy != nil {
fields["forwarder.id"] = nodeInfo.ForwardedBy.NodeID
}
log := log.G(stream.Context()).WithFields(fields)
log.Debugf("")
if _, err = d.nodes.GetWithSession(nodeID, r.SessionID); err != nil {
return err
}
var (
sequence int64
appliesTo string
initial api.AssignmentsMessage
)
tasksMap := make(map[string]*api.Task)
tasksUsingSecret := make(map[string]map[string]struct{})
sendMessage := func(msg api.AssignmentsMessage, assignmentType api.AssignmentsMessage_Type) error {
sequence++
msg.AppliesTo = appliesTo
msg.ResultsIn = strconv.FormatInt(sequence, 10)
appliesTo = msg.ResultsIn
msg.Type = assignmentType
if err := stream.Send(&msg); err != nil {
return err
}
return nil
}
// returns a slice of new secrets to send down
addSecretsForTask := func(readTx store.ReadTx, t *api.Task) []*api.Secret {
container := t.Spec.GetContainer()
if container == nil {
return nil
}
var newSecrets []*api.Secret
for _, secretRef := range container.Secrets {
// Empty ID prefix will return all secrets. Bail if there is no SecretID
if secretRef.SecretID == "" {
log.Debugf("invalid secret reference")
continue
}
secretID := secretRef.SecretID
log := log.WithFields(logrus.Fields{
"secret.id": secretID,
"secret.name": secretRef.SecretName,
})
if len(tasksUsingSecret[secretID]) == 0 {
tasksUsingSecret[secretID] = make(map[string]struct{})
secrets, err := store.FindSecrets(readTx, store.ByIDPrefix(secretID))
if err != nil {
log.WithError(err).Errorf("error retrieving secret")
continue
}
if len(secrets) != 1 {
log.Debugf("secret not found")
continue
}
// If the secret was found and there was one result
// (there should never be more than one because of the
// uniqueness constraint), add this secret to our
// initial set that we send down.
newSecrets = append(newSecrets, secrets[0])
}
tasksUsingSecret[secretID][t.ID] = struct{}{}
}
return newSecrets
}
// TODO(aaronl): Also send node secrets that should be exposed to
// this node.
nodeTasks, cancel, err := store.ViewAndWatch(
d.store,
func(readTx store.ReadTx) error {
tasks, err := store.FindTasks(readTx, store.ByNodeID(nodeID))
//.........這裏部分代碼省略.........
示例7: markNodesUnknown
func (d *Dispatcher) markNodesUnknown(ctx context.Context) error {
log := log.G(ctx).WithField("method", "(*Dispatcher).markNodesUnknown")
var nodes []*api.Node
var err error
d.store.View(func(tx store.ReadTx) {
nodes, err = store.FindNodes(tx, store.All)
})
if err != nil {
return errors.Wrap(err, "failed to get list of nodes")
}
_, err = d.store.Batch(func(batch *store.Batch) error {
for _, n := range nodes {
err := batch.Update(func(tx store.Tx) error {
// check if node is still here
node := store.GetNode(tx, n.ID)
if node == nil {
return nil
}
// do not try to resurrect down nodes
if node.Status.State == api.NodeStatus_DOWN {
nodeCopy := node
expireFunc := func() {
if err := d.moveTasksToOrphaned(nodeCopy.ID); err != nil {
log.WithError(err).Error(`failed to move all tasks to "ORPHANED" state`)
}
d.downNodes.Delete(nodeCopy.ID)
}
d.downNodes.Add(nodeCopy, expireFunc)
return nil
}
node.Status.State = api.NodeStatus_UNKNOWN
node.Status.Message = `Node moved to "unknown" state due to leadership change in cluster`
nodeID := node.ID
expireFunc := func() {
log := log.WithField("node", nodeID)
log.Debugf("heartbeat expiration for unknown node")
if err := d.markNodeNotReady(nodeID, api.NodeStatus_DOWN, `heartbeat failure for node in "unknown" state`); err != nil {
log.WithError(err).Errorf(`failed deregistering node after heartbeat expiration for node in "unknown" state`)
}
}
if err := d.nodes.AddUnknown(node, expireFunc); err != nil {
return errors.Wrap(err, `adding node in "unknown" state to node store failed`)
}
if err := store.UpdateNode(tx, node); err != nil {
return errors.Wrap(err, "update failed")
}
return nil
})
if err != nil {
log.WithField("node", n.ID).WithError(err).Errorf(`failed to move node to "unknown" state`)
}
}
return nil
})
return err
}
示例8: watch
func (s *session) watch(ctx context.Context) error {
log := log.G(ctx).WithFields(logrus.Fields{"method": "(*session).watch"})
log.Debugf("")
var (
resp *api.AssignmentsMessage
assignmentWatch api.Dispatcher_AssignmentsClient
tasksWatch api.Dispatcher_TasksClient
streamReference string
tasksFallback bool
err error
)
client := api.NewDispatcherClient(s.conn)
for {
// If this is the first time we're running the loop, or there was a reference mismatch
// attempt to get the assignmentWatch
if assignmentWatch == nil && !tasksFallback {
assignmentWatch, err = client.Assignments(ctx, &api.AssignmentsRequest{SessionID: s.sessionID})
if err != nil {
return err
}
}
// We have an assignmentWatch, let's try to receive an AssignmentMessage
if assignmentWatch != nil {
// If we get a code = 12 desc = unknown method Assignments, try to use tasks
resp, err = assignmentWatch.Recv()
if err != nil {
if grpc.Code(err) != codes.Unimplemented {
return err
}
tasksFallback = true
assignmentWatch = nil
log.WithError(err).Infof("falling back to Tasks")
}
}
// This code is here for backwards compatibility (so that newer clients can use the
// older method Tasks)
if tasksWatch == nil && tasksFallback {
tasksWatch, err = client.Tasks(ctx, &api.TasksRequest{SessionID: s.sessionID})
if err != nil {
return err
}
}
if tasksWatch != nil {
// When falling back to Tasks because of an old managers, we wrap the tasks in assignments.
var taskResp *api.TasksMessage
var assignmentChanges []*api.AssignmentChange
taskResp, err = tasksWatch.Recv()
if err != nil {
return err
}
for _, t := range taskResp.Tasks {
taskChange := &api.AssignmentChange{
Assignment: &api.Assignment{
Item: &api.Assignment_Task{
Task: t,
},
},
Action: api.AssignmentChange_AssignmentActionUpdate,
}
assignmentChanges = append(assignmentChanges, taskChange)
}
resp = &api.AssignmentsMessage{Type: api.AssignmentsMessage_COMPLETE, Changes: assignmentChanges}
}
// If there seems to be a gap in the stream, let's break out of the inner for and
// re-sync (by calling Assignments again).
if streamReference != "" && streamReference != resp.AppliesTo {
assignmentWatch = nil
} else {
streamReference = resp.ResultsIn
}
select {
case s.assignments <- resp:
case <-s.closed:
return errSessionClosed
case <-ctx.Done():
return ctx.Err()
}
}
}
示例9: Assignments
// Assignments is a stream of assignments for a node. Each message contains
// either full list of tasks and secrets for the node, or an incremental update.
func (d *Dispatcher) Assignments(r *api.AssignmentsRequest, stream api.Dispatcher_AssignmentsServer) error {
nodeInfo, err := ca.RemoteNode(stream.Context())
if err != nil {
return err
}
nodeID := nodeInfo.NodeID
if err := d.isRunningLocked(); err != nil {
return err
}
fields := logrus.Fields{
"node.id": nodeID,
"node.session": r.SessionID,
"method": "(*Dispatcher).Assignments",
}
if nodeInfo.ForwardedBy != nil {
fields["forwarder.id"] = nodeInfo.ForwardedBy.NodeID
}
log := log.G(stream.Context()).WithFields(fields)
log.Debugf("")
if _, err = d.nodes.GetWithSession(nodeID, r.SessionID); err != nil {
return err
}
var (
sequence int64
appliesTo string
initial api.AssignmentsMessage
)
tasksMap := make(map[string]*api.Task)
sendMessage := func(msg api.AssignmentsMessage, assignmentType api.AssignmentsMessage_Type) error {
sequence++
msg.AppliesTo = appliesTo
msg.ResultsIn = strconv.FormatInt(sequence, 10)
appliesTo = msg.ResultsIn
msg.Type = assignmentType
if err := stream.Send(&msg); err != nil {
return err
}
return nil
}
// TODO(aaronl): Also send node secrets that should be exposed to
// this node.
nodeTasks, cancel, err := store.ViewAndWatch(
d.store,
func(readTx store.ReadTx) error {
tasks, err := store.FindTasks(readTx, store.ByNodeID(nodeID))
if err != nil {
return err
}
for _, t := range tasks {
// We only care about tasks that are ASSIGNED or
// higher. If the state is below ASSIGNED, the
// task may not meet the constraints for this
// node, so we have to be careful about sending
// secrets associated with it.
if t.Status.State < api.TaskStateAssigned {
continue
}
tasksMap[t.ID] = t
initial.UpdateTasks = append(initial.UpdateTasks, t)
}
return nil
},
state.EventUpdateTask{Task: &api.Task{NodeID: nodeID},
Checks: []state.TaskCheckFunc{state.TaskCheckNodeID}},
state.EventDeleteTask{Task: &api.Task{NodeID: nodeID},
Checks: []state.TaskCheckFunc{state.TaskCheckNodeID}},
)
if err != nil {
return err
}
defer cancel()
if err := sendMessage(initial, api.AssignmentsMessage_COMPLETE); err != nil {
return err
}
for {
// Check for session expiration
if _, err := d.nodes.GetWithSession(nodeID, r.SessionID); err != nil {
return err
}
// bursty events should be processed in batches and sent out together
var (
update api.AssignmentsMessage
modificationCnt int
batchingTimer *time.Timer
batchingTimeout <-chan time.Time
updateTasks = make(map[string]*api.Task)
//.........這裏部分代碼省略.........