本文整理匯總了Golang中github.com/docker/swarmkit/log.WithLogger函數的典型用法代碼示例。如果您正苦於以下問題:Golang WithLogger函數的具體用法?Golang WithLogger怎麽用?Golang WithLogger使用的例子?那麽, 這裏精選的函數代碼示例或許可以為您提供幫助。
在下文中一共展示了WithLogger函數的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: run
func (s *session) run(ctx context.Context, delay time.Duration, description *api.NodeDescription) {
timer := time.NewTimer(delay) // delay before registering.
defer timer.Stop()
select {
case <-timer.C:
case <-ctx.Done():
return
}
if err := s.start(ctx, description); err != nil {
select {
case s.errs <- err:
case <-s.closed:
case <-ctx.Done():
}
return
}
ctx = log.WithLogger(ctx, log.G(ctx).WithField("session.id", s.sessionID))
go runctx(ctx, s.closed, s.errs, s.heartbeat)
go runctx(ctx, s.closed, s.errs, s.watch)
go runctx(ctx, s.closed, s.errs, s.listen)
go runctx(ctx, s.closed, s.errs, s.logSubscriptions)
close(s.registered)
}
示例2: Init
// Init prepares the worker for assignments.
func (w *worker) Init(ctx context.Context) error {
w.mu.Lock()
defer w.mu.Unlock()
ctx = log.WithLogger(ctx, log.G(ctx).WithField("module", "worker"))
// TODO(stevvooe): Start task cleanup process.
// read the tasks from the database and start any task managers that may be needed.
return w.db.Update(func(tx *bolt.Tx) error {
return WalkTasks(tx, func(task *api.Task) error {
if !TaskAssigned(tx, task.ID) {
// NOTE(stevvooe): If tasks can survive worker restart, we need
// to startup the controller and ensure they are removed. For
// now, we can simply remove them from the database.
if err := DeleteTask(tx, task.ID); err != nil {
log.G(ctx).WithError(err).Errorf("error removing task %v", task.ID)
}
return nil
}
status, err := GetTaskStatus(tx, task.ID)
if err != nil {
log.G(ctx).WithError(err).Error("unable to read tasks status")
return nil
}
task.Status = *status // merges the status into the task, ensuring we start at the right point.
return w.startTask(ctx, tx, task)
})
})
}
示例3: serveListener
// serveListener serves a listener for local and non local connections.
func (m *Manager) serveListener(ctx context.Context, errServe chan error, proto string, lis net.Listener) {
ctx = log.WithLogger(ctx, log.G(ctx).WithFields(
logrus.Fields{
"proto": lis.Addr().Network(),
"addr": lis.Addr().String()}))
if proto == "unix" {
log.G(ctx).Info("Listening for local connections")
// we need to disallow double closes because UnixListener.Close
// can delete unix-socket file of newer listener. grpc calls
// Close twice indeed: in Serve and in Stop.
errServe <- m.localserver.Serve(&closeOnceListener{Listener: lis})
} else {
log.G(ctx).Info("Listening for connections")
errServe <- m.server.Serve(lis)
}
}
示例4: AddManager
// AddManager adds node with Manager role(both agent and manager).
func (c *testCluster) AddManager() error {
// first node
var n *testNode
if len(c.nodes) == 0 {
node, err := newTestNode("", "")
if err != nil {
return err
}
n = node
} else {
joinAddr, err := c.RandomManager().node.RemoteAPIAddr()
if err != nil {
return err
}
clusterInfo, err := c.api.ListClusters(context.Background(), &api.ListClustersRequest{})
if err != nil {
return err
}
if len(clusterInfo.Clusters) == 0 {
return fmt.Errorf("joining manager: there is no cluster created in storage")
}
node, err := newTestNode(joinAddr, clusterInfo.Clusters[0].RootCA.JoinTokens.Manager)
if err != nil {
return err
}
n = node
}
c.counter++
ctx := log.WithLogger(c.ctx, log.L.WithField("testnode", c.counter))
c.wg.Add(1)
go func() {
c.errs <- n.node.Start(ctx)
c.wg.Done()
}()
select {
case <-n.node.Ready():
case <-time.After(opsTimeout):
return fmt.Errorf("node did not ready in time")
}
c.nodes[n.node.NodeID()] = n
c.nodesOrder[n.node.NodeID()] = c.counter
return nil
}
示例5: buildTestEnv
func buildTestEnv(t *testing.T, task *api.Task) (context.Context, *MockController, func()) {
var (
ctx, cancel = context.WithCancel(context.Background())
mocks = gomock.NewController(t)
ctlr = NewMockController(mocks)
)
// Put test name into log messages. Awesome!
pc, _, _, ok := runtime.Caller(1)
if ok {
fn := runtime.FuncForPC(pc)
ctx = log.WithLogger(ctx, log.L.WithField("test", fn.Name()))
}
return ctx, ctlr, func() {
cancel()
mocks.Finish()
}
}
示例6: run
func (s *session) run(ctx context.Context, delay time.Duration) {
time.Sleep(delay) // delay before registering.
if err := s.start(ctx); err != nil {
select {
case s.errs <- err:
case <-s.closed:
case <-ctx.Done():
}
return
}
ctx = log.WithLogger(ctx, log.G(ctx).WithField("session.id", s.sessionID))
go runctx(ctx, s.closed, s.errs, s.heartbeat)
go runctx(ctx, s.closed, s.errs, s.watch)
go runctx(ctx, s.closed, s.errs, s.listen)
close(s.registered)
}
示例7: StartNode
// Starts a node from a stopped state
func (c *testCluster) StartNode(id string) error {
n, ok := c.nodes[id]
if !ok {
return fmt.Errorf("set node role: node %s not found", id)
}
ctx := log.WithLogger(c.ctx, log.L.WithField("testnode", c.nodesOrder[id]))
errCtx, cancel := context.WithCancel(context.Background())
done := make(chan error)
defer cancel()
defer close(done)
c.wg.Add(2)
go func() {
c.errs <- n.node.Start(ctx)
c.wg.Done()
}()
go func(n *node.Node) {
err := n.Err(errCtx)
select {
case <-errCtx.Done():
default:
done <- err
}
c.wg.Done()
}(n.node)
select {
case <-n.node.Ready():
case err := <-done:
return err
case <-time.After(opsTimeout):
return fmt.Errorf("node did not ready in time")
}
if n.node.NodeID() != id {
return fmt.Errorf("restarted node does not have have the same ID")
}
return nil
}
示例8: newTaskManager
func (w *worker) newTaskManager(ctx context.Context, tx *bolt.Tx, task *api.Task) (*taskManager, error) {
ctx = log.WithLogger(ctx, log.G(ctx).WithField("task.id", task.ID))
ctlr, status, err := exec.Resolve(ctx, task, w.executor)
if err := w.updateTaskStatus(ctx, tx, task.ID, status); err != nil {
log.G(ctx).WithError(err).Error("error updating task status after controller resolution")
}
if err != nil {
log.G(ctx).Error("controller resolution failed")
return nil, err
}
return newTaskManager(ctx, task, ctlr, statusReporterFunc(func(ctx context.Context, taskID string, status *api.TaskStatus) error {
w.mu.RLock()
defer w.mu.RUnlock()
return w.db.Update(func(tx *bolt.Tx) error {
return w.updateTaskStatus(ctx, tx, taskID, status)
})
})), nil
}
示例9: serveListener
// serveListener serves a listener for local and non local connections.
func (m *Manager) serveListener(ctx context.Context, lCh <-chan net.Listener) {
var l net.Listener
select {
case l = <-lCh:
case <-ctx.Done():
return
}
ctx = log.WithLogger(ctx, log.G(ctx).WithFields(
logrus.Fields{
"proto": l.Addr().Network(),
"addr": l.Addr().String(),
}))
if _, ok := l.(*net.TCPListener); !ok {
log.G(ctx).Info("Listening for local connections")
// we need to disallow double closes because UnixListener.Close
// can delete unix-socket file of newer listener. grpc calls
// Close twice indeed: in Serve and in Stop.
m.errServe <- m.localserver.Serve(&closeOnceListener{Listener: l})
} else {
log.G(ctx).Info("Listening for connections")
m.errServe <- m.server.Serve(l)
}
}
示例10: StartNode
// Starts a node from a stopped state
func (c *testCluster) StartNode(id string) error {
n, ok := c.nodes[id]
if !ok {
return fmt.Errorf("set node role: node %s not found", id)
}
ctx := log.WithLogger(c.ctx, log.L.WithField("testnode", c.nodesOrder[id]))
c.wg.Add(1)
go func() {
c.errs <- n.node.Start(ctx)
c.wg.Done()
}()
select {
case <-n.node.Ready():
case <-time.After(opsTimeout):
return fmt.Errorf("node did not ready in time")
}
if n.node.NodeID() != id {
return fmt.Errorf("restarted node does not have have the same ID")
}
return nil
}
示例11: run
func (tm *taskManager) run(ctx context.Context) {
ctx, cancelAll := context.WithCancel(ctx)
defer cancelAll() // cancel all child operations on exit.
ctx = log.WithLogger(ctx, log.G(ctx).WithField("module", "taskmanager"))
var (
opctx context.Context
cancel context.CancelFunc
run = make(chan struct{}, 1)
statusq = make(chan *api.TaskStatus)
errs = make(chan error)
shutdown = tm.shutdown
updated bool // true if the task was updated.
)
defer func() {
// closure picks up current value of cancel.
if cancel != nil {
cancel()
}
}()
run <- struct{}{} // prime the pump
for {
select {
case <-run:
// always check for shutdown before running.
select {
case <-tm.shutdown:
continue // ignore run request and handle shutdown
case <-tm.closed:
continue
default:
}
opctx, cancel = context.WithCancel(ctx)
// Several variables need to be snapshotted for the closure below.
opcancel := cancel // fork for the closure
running := tm.task.Copy() // clone the task before dispatch
statusqLocal := statusq
updatedLocal := updated // capture state of update for goroutine
updated = false
go runctx(ctx, tm.closed, errs, func(ctx context.Context) error {
defer opcancel()
if updatedLocal {
// before we do anything, update the task for the controller.
// always update the controller before running.
if err := tm.ctlr.Update(opctx, running); err != nil {
log.G(ctx).WithError(err).Error("updating task controller failed")
return err
}
}
status, err := exec.Do(opctx, running, tm.ctlr)
if status != nil {
// always report the status if we get one back. This
// returns to the manager loop, then reports the status
// upstream.
select {
case statusqLocal <- status:
case <-ctx.Done(): // not opctx, since that may have been cancelled.
}
if err := tm.reporter.UpdateTaskStatus(ctx, running.ID, status); err != nil {
log.G(ctx).WithError(err).Error("failed reporting status to agent")
}
}
return err
})
case err := <-errs:
// This branch is always executed when an operations completes. The
// goal is to decide whether or not we re-dispatch the operation.
cancel = nil
select {
case <-tm.shutdown:
shutdown = tm.shutdown // re-enable the shutdown branch
continue // no dispatch if we are in shutdown.
default:
}
switch err {
case exec.ErrTaskNoop:
if !updated {
continue // wait till getting pumped via update.
}
case exec.ErrTaskRetry:
// TODO(stevvooe): Add exponential backoff with random jitter
// here. For now, this backoff is enough to keep the task
// manager from running away with the CPU.
time.AfterFunc(time.Second, func() {
errs <- nil // repump this branch, with no err
})
continue
case nil, context.Canceled, context.DeadlineExceeded:
// no log in this case
//.........這裏部分代碼省略.........
示例12: reconcileTaskState
func reconcileTaskState(ctx context.Context, w *worker, assignments []*api.AssignmentChange, fullSnapshot bool) error {
var (
updatedTasks []*api.Task
removedTasks []*api.Task
)
for _, a := range assignments {
if t := a.Assignment.GetTask(); t != nil {
switch a.Action {
case api.AssignmentChange_AssignmentActionUpdate:
updatedTasks = append(updatedTasks, t)
case api.AssignmentChange_AssignmentActionRemove:
removedTasks = append(removedTasks, t)
}
}
}
log.G(ctx).WithFields(logrus.Fields{
"len(updatedTasks)": len(updatedTasks),
"len(removedTasks)": len(removedTasks),
}).Debug("(*worker).reconcileTaskState")
tx, err := w.db.Begin(true)
if err != nil {
log.G(ctx).WithError(err).Error("failed starting transaction against task database")
return err
}
defer tx.Rollback()
assigned := map[string]struct{}{}
for _, task := range updatedTasks {
log.G(ctx).WithFields(
logrus.Fields{
"task.id": task.ID,
"task.desiredstate": task.DesiredState}).Debug("assigned")
if err := PutTask(tx, task); err != nil {
return err
}
if err := SetTaskAssignment(tx, task.ID, true); err != nil {
return err
}
if mgr, ok := w.taskManagers[task.ID]; ok {
if err := mgr.Update(ctx, task); err != nil && err != ErrClosed {
log.G(ctx).WithError(err).Error("failed updating assigned task")
}
} else {
// we may have still seen the task, let's grab the status from
// storage and replace it with our status, if we have it.
status, err := GetTaskStatus(tx, task.ID)
if err != nil {
if err != errTaskUnknown {
return err
}
// never seen before, register the provided status
if err := PutTaskStatus(tx, task.ID, &task.Status); err != nil {
return err
}
} else {
task.Status = *status
}
w.startTask(ctx, tx, task)
}
assigned[task.ID] = struct{}{}
}
closeManager := func(tm *taskManager) {
// when a task is no longer assigned, we shutdown the task manager for
// it and leave cleanup to the sweeper.
if err := tm.Close(); err != nil {
log.G(ctx).WithError(err).Error("error closing task manager")
}
}
removeTaskAssignment := func(taskID string) error {
ctx := log.WithLogger(ctx, log.G(ctx).WithField("task.id", taskID))
if err := SetTaskAssignment(tx, taskID, false); err != nil {
log.G(ctx).WithError(err).Error("error setting task assignment in database")
}
return err
}
// If this was a complete set of assignments, we're going to remove all the remaining
// tasks.
if fullSnapshot {
for id, tm := range w.taskManagers {
if _, ok := assigned[id]; ok {
continue
}
err := removeTaskAssignment(id)
if err == nil {
delete(w.taskManagers, id)
go closeManager(tm)
}
}
} else {
//.........這裏部分代碼省略.........
示例13: Run
//.........這裏部分代碼省略.........
authenticatedNodeCAAPI := api.NewAuthenticatedWrapperNodeCAServer(m.caserver, authorize)
authenticatedRaftAPI := api.NewAuthenticatedWrapperRaftServer(m.RaftNode, authorize)
authenticatedHealthAPI := api.NewAuthenticatedWrapperHealthServer(healthServer, authorize)
authenticatedRaftMembershipAPI := api.NewAuthenticatedWrapperRaftMembershipServer(m.RaftNode, authorize)
proxyDispatcherAPI := api.NewRaftProxyDispatcherServer(authenticatedDispatcherAPI, cs, m.RaftNode, ca.WithMetadataForwardTLSInfo)
proxyCAAPI := api.NewRaftProxyCAServer(authenticatedCAAPI, cs, m.RaftNode, ca.WithMetadataForwardTLSInfo)
proxyNodeCAAPI := api.NewRaftProxyNodeCAServer(authenticatedNodeCAAPI, cs, m.RaftNode, ca.WithMetadataForwardTLSInfo)
proxyRaftMembershipAPI := api.NewRaftProxyRaftMembershipServer(authenticatedRaftMembershipAPI, cs, m.RaftNode, ca.WithMetadataForwardTLSInfo)
// localProxyControlAPI is a special kind of proxy. It is only wired up
// to receive requests from a trusted local socket, and these requests
// don't use TLS, therefore the requests it handles locally should
// bypass authorization. When it proxies, it sends them as requests from
// this manager rather than forwarded requests (it has no TLS
// information to put in the metadata map).
forwardAsOwnRequest := func(ctx context.Context) (context.Context, error) { return ctx, nil }
localProxyControlAPI := api.NewRaftProxyControlServer(baseControlAPI, controlAPIConnSelector, m.RaftNode, forwardAsOwnRequest)
// Everything registered on m.server should be an authenticated
// wrapper, or a proxy wrapping an authenticated wrapper!
api.RegisterCAServer(m.server, proxyCAAPI)
api.RegisterNodeCAServer(m.server, proxyNodeCAAPI)
api.RegisterRaftServer(m.server, authenticatedRaftAPI)
api.RegisterHealthServer(m.server, authenticatedHealthAPI)
api.RegisterRaftMembershipServer(m.server, proxyRaftMembershipAPI)
api.RegisterControlServer(m.localserver, localProxyControlAPI)
api.RegisterControlServer(m.server, authenticatedControlAPI)
api.RegisterDispatcherServer(m.server, proxyDispatcherAPI)
errServe := make(chan error, 2)
for proto, l := range m.listeners {
go func(proto string, lis net.Listener) {
ctx := log.WithLogger(ctx, log.G(ctx).WithFields(
logrus.Fields{
"proto": lis.Addr().Network(),
"addr": lis.Addr().String()}))
if proto == "unix" {
log.G(ctx).Info("Listening for local connections")
// we need to disallow double closes because UnixListener.Close
// can delete unix-socket file of newer listener. grpc calls
// Close twice indeed: in Serve and in Stop.
errServe <- m.localserver.Serve(&closeOnceListener{Listener: lis})
} else {
log.G(ctx).Info("Listening for connections")
errServe <- m.server.Serve(lis)
}
}(proto, l)
}
// Set the raft server as serving for the health server
healthServer.SetServingStatus("Raft", api.HealthCheckResponse_SERVING)
if err := m.RaftNode.JoinAndStart(); err != nil {
for _, lis := range m.listeners {
lis.Close()
}
return fmt.Errorf("can't initialize raft node: %v", err)
}
close(m.started)
go func() {
err := m.RaftNode.Run(ctx)
if err != nil {
log.G(ctx).Error(err)
示例14: Run
// Run is the main loop for a Raft node, it goes along the state machine,
// acting on the messages received from other Raft nodes in the cluster.
//
// Before running the main loop, it first starts the raft node based on saved
// cluster state. If no saved state exists, it starts a single-node cluster.
func (n *Node) Run(ctx context.Context) error {
ctx = log.WithLogger(ctx, logrus.WithField("raft_id", fmt.Sprintf("%x", n.Config.ID)))
ctx, cancel := context.WithCancel(ctx)
// nodeRemoved indicates that node was stopped due its removal.
nodeRemoved := false
defer func() {
cancel()
n.stop(ctx)
if nodeRemoved {
// Move WAL and snapshot out of the way, since
// they are no longer usable.
if err := n.moveWALAndSnap(); err != nil {
log.G(ctx).WithError(err).Error("failed to move wal after node removal")
}
}
n.done()
}()
wasLeader := false
for {
select {
case <-n.ticker.C():
n.raftNode.Tick()
n.cluster.Tick()
case rd := <-n.raftNode.Ready():
raftConfig := DefaultRaftConfig()
n.memoryStore.View(func(readTx store.ReadTx) {
clusters, err := store.FindClusters(readTx, store.ByName(store.DefaultClusterName))
if err == nil && len(clusters) == 1 {
raftConfig = clusters[0].Spec.Raft
}
})
// Save entries to storage
if err := n.saveToStorage(&raftConfig, rd.HardState, rd.Entries, rd.Snapshot); err != nil {
log.G(ctx).WithError(err).Error("failed to save entries to storage")
}
if len(rd.Messages) != 0 {
// Send raft messages to peers
if err := n.send(ctx, rd.Messages); err != nil {
log.G(ctx).WithError(err).Error("failed to send message to members")
}
}
// Apply snapshot to memory store. The snapshot
// was applied to the raft store in
// saveToStorage.
if !raft.IsEmptySnap(rd.Snapshot) {
// Load the snapshot data into the store
if err := n.restoreFromSnapshot(rd.Snapshot.Data, false); err != nil {
log.G(ctx).WithError(err).Error("failed to restore from snapshot")
}
n.appliedIndex = rd.Snapshot.Metadata.Index
n.snapshotIndex = rd.Snapshot.Metadata.Index
n.confState = rd.Snapshot.Metadata.ConfState
}
// If we cease to be the leader, we must cancel any
// proposals that are currently waiting for a quorum to
// acknowledge them. It is still possible for these to
// become committed, but if that happens we will apply
// them as any follower would.
// It is important that we cancel these proposals before
// calling processCommitted, so processCommitted does
// not deadlock.
if rd.SoftState != nil {
if wasLeader && rd.SoftState.RaftState != raft.StateLeader {
wasLeader = false
if atomic.LoadUint32(&n.signalledLeadership) == 1 {
atomic.StoreUint32(&n.signalledLeadership, 0)
n.leadershipBroadcast.Publish(IsFollower)
}
// It is important that we set n.signalledLeadership to 0
// before calling n.wait.cancelAll. When a new raft
// request is registered, it checks n.signalledLeadership
// afterwards, and cancels the registration if it is 0.
// If cancelAll was called first, this call might run
// before the new request registers, but
// signalledLeadership would be set after the check.
// Setting signalledLeadership before calling cancelAll
// ensures that if a new request is registered during
// this transition, it will either be cancelled by
// cancelAll, or by its own check of signalledLeadership.
n.wait.cancelAll()
} else if !wasLeader && rd.SoftState.RaftState == raft.StateLeader {
wasLeader = true
}
}
//.........這裏部分代碼省略.........
示例15: Run
// Run runs dispatcher tasks which should be run on leader dispatcher.
// Dispatcher can be stopped with cancelling ctx or calling Stop().
func (d *Dispatcher) Run(ctx context.Context) error {
d.mu.Lock()
if d.isRunning() {
d.mu.Unlock()
return fmt.Errorf("dispatcher is already running")
}
logger := log.G(ctx).WithField("module", "dispatcher")
ctx = log.WithLogger(ctx, logger)
if err := d.markNodesUnknown(ctx); err != nil {
logger.Errorf(`failed to move all nodes to "unknown" state: %v`, err)
}
configWatcher, cancel, err := store.ViewAndWatch(
d.store,
func(readTx store.ReadTx) error {
clusters, err := store.FindClusters(readTx, store.ByName(store.DefaultClusterName))
if err != nil {
return err
}
if err == nil && len(clusters) == 1 {
heartbeatPeriod, err := ptypes.Duration(clusters[0].Spec.Dispatcher.HeartbeatPeriod)
if err == nil && heartbeatPeriod > 0 {
d.config.HeartbeatPeriod = heartbeatPeriod
}
if clusters[0].NetworkBootstrapKeys != nil {
d.networkBootstrapKeys = clusters[0].NetworkBootstrapKeys
}
}
return nil
},
state.EventUpdateCluster{},
)
if err != nil {
d.mu.Unlock()
return err
}
defer cancel()
d.ctx, d.cancel = context.WithCancel(ctx)
d.mu.Unlock()
publishManagers := func() {
mgrs := getWeightedPeers(d.cluster)
sort.Sort(weightedPeerByNodeID(mgrs))
d.mu.Lock()
if reflect.DeepEqual(mgrs, d.lastSeenManagers) {
d.mu.Unlock()
return
}
d.lastSeenManagers = mgrs
d.mu.Unlock()
d.mgrQueue.Publish(mgrs)
}
publishManagers()
publishTicker := time.NewTicker(1 * time.Second)
defer publishTicker.Stop()
batchTimer := time.NewTimer(maxBatchInterval)
defer batchTimer.Stop()
for {
select {
case <-publishTicker.C:
publishManagers()
case <-d.processTaskUpdatesTrigger:
d.processTaskUpdates()
batchTimer.Reset(maxBatchInterval)
case <-batchTimer.C:
d.processTaskUpdates()
batchTimer.Reset(maxBatchInterval)
case v := <-configWatcher:
cluster := v.(state.EventUpdateCluster)
d.mu.Lock()
if cluster.Cluster.Spec.Dispatcher.HeartbeatPeriod != nil {
// ignore error, since Spec has passed validation before
heartbeatPeriod, _ := ptypes.Duration(cluster.Cluster.Spec.Dispatcher.HeartbeatPeriod)
if heartbeatPeriod != d.config.HeartbeatPeriod {
// only call d.nodes.updatePeriod when heartbeatPeriod changes
d.config.HeartbeatPeriod = heartbeatPeriod
d.nodes.updatePeriod(d.config.HeartbeatPeriod, d.config.HeartbeatEpsilon, d.config.GracePeriodMultiplier)
}
}
d.networkBootstrapKeys = cluster.Cluster.NetworkBootstrapKeys
d.mu.Unlock()
d.keyMgrQueue.Publish(struct{}{})
case <-d.ctx.Done():
return nil
}
}
}