本文整理匯總了Golang中github.com/docker/swarmkit/manager.New函數的典型用法代碼示例。如果您正苦於以下問題:Golang New函數的具體用法?Golang New怎麽用?Golang New使用的例子?那麽, 這裏精選的函數代碼示例或許可以為您提供幫助。
在下文中一共展示了New函數的5個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: newManager
func newManager(t *testing.T, joinAddr string, securityConfig *ca.SecurityConfig) (*testManager, error) {
ltcp, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil {
return nil, err
}
stateDir, err := ioutil.TempDir("", "test-raft")
if err != nil {
return nil, err
}
m, err := manager.New(&manager.Config{
ProtoListener: map[string]net.Listener{"tcp": ltcp},
StateDir: stateDir,
JoinRaft: joinAddr,
SecurityConfig: securityConfig,
})
if err != nil {
return nil, err
}
go m.Run(context.Background())
time.Sleep(100 * time.Millisecond)
return &testManager{
m: m,
addr: ltcp.Addr().String(),
}, nil
}
示例2: runManager
func (n *Node) runManager(ctx context.Context, securityConfig *ca.SecurityConfig, ready chan struct{}) error {
for {
if err := n.waitRole(ctx, ca.ManagerRole); err != nil {
return err
}
remoteAddr, _ := n.remotes.Select(n.NodeID())
m, err := manager.New(&manager.Config{
ForceNewCluster: n.config.ForceNewCluster,
ProtoAddr: map[string]string{
"tcp": n.config.ListenRemoteAPI,
"unix": n.config.ListenControlAPI,
},
AdvertiseAddr: n.config.AdvertiseRemoteAPI,
SecurityConfig: securityConfig,
ExternalCAs: n.config.ExternalCAs,
JoinRaft: remoteAddr.Addr,
StateDir: n.config.StateDir,
HeartbeatTick: n.config.HeartbeatTick,
ElectionTick: n.config.ElectionTick,
})
if err != nil {
return err
}
done := make(chan struct{})
var runErr error
go func() {
runErr = m.Run(context.Background())
close(done)
}()
n.Lock()
n.manager = m
n.Unlock()
connCtx, connCancel := context.WithCancel(ctx)
go n.initManagerConnection(connCtx, ready)
// this happens only on initial start
if ready != nil {
go func(ready chan struct{}) {
select {
case <-ready:
addr, err := n.RemoteAPIAddr()
if err != nil {
log.G(ctx).WithError(err).Errorf("get remote api addr")
} else {
n.remotes.Observe(api.Peer{NodeID: n.NodeID(), Addr: addr}, remotes.DefaultObservationWeight)
}
case <-connCtx.Done():
}
}(ready)
ready = nil
}
roleChanged := make(chan error)
waitCtx, waitCancel := context.WithCancel(ctx)
go func() {
err := n.waitRole(waitCtx, ca.WorkerRole)
roleChanged <- err
}()
select {
case <-done:
// Fail out if m.Run() returns error, otherwise wait for
// role change.
if runErr != nil {
err = runErr
} else {
err = <-roleChanged
}
case err = <-roleChanged:
}
n.Lock()
n.manager = nil
n.Unlock()
select {
case <-done:
case <-ctx.Done():
err = ctx.Err()
m.Stop(context.Background())
<-done
}
connCancel()
n.setControlSocket(nil)
waitCancel()
if err != nil {
return err
}
}
}
示例3: runManager
func (n *Node) runManager(ctx context.Context, securityConfig *ca.SecurityConfig, ready chan struct{}) error {
for {
n.waitRole(ctx, ca.ManagerRole)
if ctx.Err() != nil {
return ctx.Err()
}
remoteAddr, _ := n.remotes.Select(n.nodeID)
m, err := manager.New(&manager.Config{
ForceNewCluster: n.config.ForceNewCluster,
ProtoAddr: map[string]string{
"tcp": n.config.ListenRemoteAPI,
"unix": n.config.ListenControlAPI,
},
AdvertiseAddr: n.config.AdvertiseRemoteAPI,
SecurityConfig: securityConfig,
ExternalCAs: n.config.ExternalCAs,
JoinRaft: remoteAddr.Addr,
StateDir: n.config.StateDir,
HeartbeatTick: n.config.HeartbeatTick,
ElectionTick: n.config.ElectionTick,
})
if err != nil {
return err
}
done := make(chan struct{})
go func() {
m.Run(context.Background()) // todo: store error
close(done)
}()
n.Lock()
n.manager = m
n.Unlock()
connCtx, connCancel := context.WithCancel(ctx)
go n.initManagerConnection(connCtx, ready)
// this happens only on initial start
if ready != nil {
go func(ready chan struct{}) {
select {
case <-ready:
n.remotes.Observe(api.Peer{NodeID: n.nodeID, Addr: n.config.ListenRemoteAPI}, 5)
case <-connCtx.Done():
}
}(ready)
ready = nil
}
n.waitRole(ctx, ca.AgentRole)
n.Lock()
n.manager = nil
n.Unlock()
select {
case <-done:
case <-ctx.Done():
err = ctx.Err()
m.Stop(context.Background())
<-done
}
connCancel()
if err != nil {
return err
}
}
}
示例4: runManager
func (n *Node) runManager(ctx context.Context, securityConfig *ca.SecurityConfig, ready chan struct{}) error {
remoteAddr, _ := n.remotes.Select(n.NodeID())
m, err := manager.New(&manager.Config{
ForceNewCluster: n.config.ForceNewCluster,
RemoteAPI: manager.RemoteAddrs{
ListenAddr: n.config.ListenRemoteAPI,
AdvertiseAddr: n.config.AdvertiseRemoteAPI,
},
ControlAPI: n.config.ListenControlAPI,
SecurityConfig: securityConfig,
ExternalCAs: n.config.ExternalCAs,
JoinRaft: remoteAddr.Addr,
StateDir: n.config.StateDir,
HeartbeatTick: n.config.HeartbeatTick,
ElectionTick: n.config.ElectionTick,
AutoLockManagers: n.config.AutoLockManagers,
UnlockKey: n.unlockKey,
Availability: n.config.Availability,
})
if err != nil {
return err
}
done := make(chan struct{})
var runErr error
go func() {
if err := m.Run(context.Background()); err != nil && err != raft.ErrMemberRemoved {
runErr = err
}
close(done)
}()
workerRole := make(chan struct{})
waitRoleCtx, waitRoleCancel := context.WithCancel(ctx)
defer waitRoleCancel()
go func() {
n.waitRole(waitRoleCtx, ca.WorkerRole)
close(workerRole)
}()
defer func() {
n.Lock()
n.manager = nil
n.Unlock()
m.Stop(ctx)
<-done
n.setControlSocket(nil)
}()
n.Lock()
n.manager = m
n.Unlock()
connCtx, connCancel := context.WithCancel(ctx)
defer connCancel()
go n.initManagerConnection(connCtx, ready)
// this happens only on initial start
if ready != nil {
go func(ready chan struct{}) {
select {
case <-ready:
addr, err := n.RemoteAPIAddr()
if err != nil {
log.G(ctx).WithError(err).Errorf("get remote api addr")
} else {
n.remotes.Observe(api.Peer{NodeID: n.NodeID(), Addr: addr}, remotes.DefaultObservationWeight)
}
case <-connCtx.Done():
}
}(ready)
}
// wait for manager stop or for role change
// if manager stopped before role change, wait for new role for 16 seconds,
// then just restart manager, we might just miss that event.
// we need to wait for role to prevent manager to start again with wrong
// certificate
select {
case <-done:
timer := time.NewTimer(16 * time.Second)
defer timer.Stop()
select {
case <-timer.C:
log.G(ctx).Warn("failed to get worker role after manager stop, restart manager")
case <-workerRole:
case <-ctx.Done():
return ctx.Err()
}
return runErr
case <-workerRole:
log.G(ctx).Info("role changed to worker, wait for manager to stop")
select {
case <-done:
return runErr
case <-ctx.Done():
return ctx.Err()
}
case <-ctx.Done():
return ctx.Err()
//.........這裏部分代碼省略.........
示例5: runManager
func (n *Node) runManager(ctx context.Context, securityConfig *ca.SecurityConfig, ready chan struct{}, workerRole <-chan struct{}) error {
remoteAddr, _ := n.remotes.Select(n.NodeID())
m, err := manager.New(&manager.Config{
ForceNewCluster: n.config.ForceNewCluster,
RemoteAPI: manager.RemoteAddrs{
ListenAddr: n.config.ListenRemoteAPI,
AdvertiseAddr: n.config.AdvertiseRemoteAPI,
},
ControlAPI: n.config.ListenControlAPI,
SecurityConfig: securityConfig,
ExternalCAs: n.config.ExternalCAs,
JoinRaft: remoteAddr.Addr,
StateDir: n.config.StateDir,
HeartbeatTick: n.config.HeartbeatTick,
ElectionTick: n.config.ElectionTick,
AutoLockManagers: n.config.AutoLockManagers,
UnlockKey: n.unlockKey,
Availability: n.config.Availability,
PluginGetter: n.config.PluginGetter,
})
if err != nil {
return err
}
done := make(chan struct{})
var runErr error
go func() {
if err := m.Run(context.Background()); err != nil {
runErr = err
}
close(done)
}()
var clearData bool
defer func() {
n.Lock()
n.manager = nil
n.Unlock()
m.Stop(ctx, clearData)
<-done
n.setControlSocket(nil)
}()
n.Lock()
n.manager = m
n.Unlock()
connCtx, connCancel := context.WithCancel(ctx)
defer connCancel()
go n.initManagerConnection(connCtx, ready)
// wait for manager stop or for role change
select {
case <-done:
return runErr
case <-workerRole:
log.G(ctx).Info("role changed to worker, stopping manager")
clearData = true
case <-m.RemovedFromRaft():
log.G(ctx).Info("manager removed from raft cluster, stopping manager")
clearData = true
case <-ctx.Done():
return ctx.Err()
}
return nil
}