本文整理匯總了Golang中github.com/docker/swarmkit/manager/state/raft.DefaultRaftConfig函數的典型用法代碼示例。如果您正苦於以下問題:Golang DefaultRaftConfig函數的具體用法?Golang DefaultRaftConfig怎麽用?Golang DefaultRaftConfig使用的例子?那麽, 這裏精選的函數代碼示例或許可以為您提供幫助。
在下文中一共展示了DefaultRaftConfig函數的3個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: becomeLeader
// becomeLeader starts the subsystems that are run on the leader.
func (m *Manager) becomeLeader(ctx context.Context) {
s := m.RaftNode.MemoryStore()
rootCA := m.config.SecurityConfig.RootCA()
nodeID := m.config.SecurityConfig.ClientTLSCreds.NodeID()
raftCfg := raft.DefaultRaftConfig()
raftCfg.ElectionTick = uint32(m.RaftNode.Config.ElectionTick)
raftCfg.HeartbeatTick = uint32(m.RaftNode.Config.HeartbeatTick)
clusterID := m.config.SecurityConfig.ClientTLSCreds.Organization()
initialCAConfig := ca.DefaultCAConfig()
initialCAConfig.ExternalCAs = m.config.ExternalCAs
s.Update(func(tx store.Tx) error {
// Add a default cluster object to the
// store. Don't check the error because
// we expect this to fail unless this
// is a brand new cluster.
store.CreateCluster(tx, defaultClusterObject(clusterID, initialCAConfig, raftCfg, rootCA))
// Add Node entry for ourself, if one
// doesn't exist already.
store.CreateNode(tx, managerNode(nodeID))
return nil
})
// Attempt to rotate the key-encrypting-key of the root CA key-material
err := m.rotateRootCAKEK(ctx, clusterID)
if err != nil {
log.G(ctx).WithError(err).Error("root key-encrypting-key rotation failed")
}
m.replicatedOrchestrator = orchestrator.NewReplicatedOrchestrator(s)
m.globalOrchestrator = orchestrator.NewGlobalOrchestrator(s)
m.taskReaper = orchestrator.NewTaskReaper(s)
m.scheduler = scheduler.New(s)
m.keyManager = keymanager.New(s, keymanager.DefaultConfig())
// TODO(stevvooe): Allocate a context that can be used to
// shutdown underlying manager processes when leadership is
// lost.
m.allocator, err = allocator.New(s)
if err != nil {
log.G(ctx).WithError(err).Error("failed to create allocator")
// TODO(stevvooe): It doesn't seem correct here to fail
// creating the allocator but then use it anyway.
}
if m.keyManager != nil {
go func(keyManager *keymanager.KeyManager) {
if err := keyManager.Run(ctx); err != nil {
log.G(ctx).WithError(err).Error("keymanager failed with an error")
}
}(m.keyManager)
}
go func(d *dispatcher.Dispatcher) {
if err := d.Run(ctx); err != nil {
log.G(ctx).WithError(err).Error("Dispatcher exited with an error")
}
}(m.Dispatcher)
go func(server *ca.Server) {
if err := server.Run(ctx); err != nil {
log.G(ctx).WithError(err).Error("CA signer exited with an error")
}
}(m.caserver)
// Start all sub-components in separate goroutines.
// TODO(aluzzardi): This should have some kind of error handling so that
// any component that goes down would bring the entire manager down.
if m.allocator != nil {
go func(allocator *allocator.Allocator) {
if err := allocator.Run(ctx); err != nil {
log.G(ctx).WithError(err).Error("allocator exited with an error")
}
}(m.allocator)
}
go func(scheduler *scheduler.Scheduler) {
if err := scheduler.Run(ctx); err != nil {
log.G(ctx).WithError(err).Error("scheduler exited with an error")
}
}(m.scheduler)
go func(taskReaper *orchestrator.TaskReaper) {
taskReaper.Run()
}(m.taskReaper)
go func(orchestrator *orchestrator.ReplicatedOrchestrator) {
if err := orchestrator.Run(ctx); err != nil {
log.G(ctx).WithError(err).Error("replicated orchestrator exited with an error")
}
}(m.replicatedOrchestrator)
go func(globalOrchestrator *orchestrator.GlobalOrchestrator) {
if err := globalOrchestrator.Run(ctx); err != nil {
//.........這裏部分代碼省略.........
示例2: Run
// Run starts all manager sub-systems and the gRPC server at the configured
// address.
// The call never returns unless an error occurs or `Stop()` is called.
//
// TODO(aluzzardi): /!\ This function is *way* too complex. /!\
// It needs to be split into smaller manageable functions.
func (m *Manager) Run(parent context.Context) error {
ctx, ctxCancel := context.WithCancel(parent)
defer ctxCancel()
// Harakiri.
go func() {
select {
case <-ctx.Done():
case <-m.stopped:
ctxCancel()
}
}()
leadershipCh, cancel := m.RaftNode.SubscribeLeadership()
defer cancel()
go func() {
for leadershipEvent := range leadershipCh {
// read out and discard all of the messages when we've stopped
// don't acquire the mutex yet. if stopped is closed, we don't need
// this stops this loop from starving Run()'s attempt to Lock
select {
case <-m.stopped:
continue
default:
// do nothing, we're not stopped
}
// we're not stopping so NOW acquire the mutex
m.mu.Lock()
newState := leadershipEvent.(raft.LeadershipState)
if newState == raft.IsLeader {
s := m.RaftNode.MemoryStore()
rootCA := m.config.SecurityConfig.RootCA()
nodeID := m.config.SecurityConfig.ClientTLSCreds.NodeID()
raftCfg := raft.DefaultRaftConfig()
raftCfg.ElectionTick = uint32(m.RaftNode.Config.ElectionTick)
raftCfg.HeartbeatTick = uint32(m.RaftNode.Config.HeartbeatTick)
clusterID := m.config.SecurityConfig.ClientTLSCreds.Organization()
initialCAConfig := ca.DefaultCAConfig()
initialCAConfig.ExternalCAs = m.config.ExternalCAs
s.Update(func(tx store.Tx) error {
// Add a default cluster object to the
// store. Don't check the error because
// we expect this to fail unless this
// is a brand new cluster.
store.CreateCluster(tx, &api.Cluster{
ID: clusterID,
Spec: api.ClusterSpec{
Annotations: api.Annotations{
Name: store.DefaultClusterName,
},
Orchestration: api.OrchestrationConfig{
TaskHistoryRetentionLimit: defaultTaskHistoryRetentionLimit,
},
Dispatcher: api.DispatcherConfig{
HeartbeatPeriod: ptypes.DurationProto(dispatcher.DefaultHeartBeatPeriod),
},
Raft: raftCfg,
CAConfig: initialCAConfig,
},
RootCA: api.RootCA{
CAKey: rootCA.Key,
CACert: rootCA.Cert,
CACertHash: rootCA.Digest.String(),
JoinTokens: api.JoinTokens{
Worker: ca.GenerateJoinToken(rootCA),
Manager: ca.GenerateJoinToken(rootCA),
},
},
})
// Add Node entry for ourself, if one
// doesn't exist already.
store.CreateNode(tx, &api.Node{
ID: nodeID,
Certificate: api.Certificate{
CN: nodeID,
Role: api.NodeRoleManager,
Status: api.IssuanceStatus{
State: api.IssuanceStateIssued,
},
},
Spec: api.NodeSpec{
Role: api.NodeRoleManager,
Membership: api.NodeMembershipAccepted,
},
})
return nil
})
//.........這裏部分代碼省略.........
示例3: TestRaftEncryptionKeyRotationWait
// This test rotates the encryption key and waits for the expected thing to happen
func TestRaftEncryptionKeyRotationWait(t *testing.T) {
t.Parallel()
nodes := make(map[uint64]*raftutils.TestNode)
var clockSource *fakeclock.FakeClock
raftConfig := raft.DefaultRaftConfig()
nodes[1], clockSource = raftutils.NewInitNode(t, tc, &raftConfig)
defer raftutils.TeardownCluster(t, nodes)
nodeIDs := []string{"id1", "id2", "id3"}
values := make([]*api.Node, len(nodeIDs))
// Propose 3 values
var err error
for i, nodeID := range nodeIDs[:3] {
values[i], err = raftutils.ProposeValue(t, nodes[1], DefaultProposalTime, nodeID)
require.NoError(t, err, "failed to propose value")
}
snapDir := filepath.Join(nodes[1].StateDir, "snap-v3-encrypted")
startingKeys := nodes[1].KeyRotator.GetKeys()
// rotate the encryption key
nodes[1].KeyRotator.QueuePendingKey([]byte("key2"))
nodes[1].KeyRotator.RotationNotify() <- struct{}{}
// the rotation should trigger a snapshot, which should notify the rotator when it's done
require.NoError(t, raftutils.PollFunc(clockSource, func() error {
snapshots, err := storage.ListSnapshots(snapDir)
if err != nil {
return err
}
if len(snapshots) != 1 {
return fmt.Errorf("expected 1 snapshot, found %d on new node", len(snapshots))
}
if nodes[1].KeyRotator.NeedsRotation() {
return fmt.Errorf("rotation never finished")
}
return nil
}))
raftutils.CheckValuesOnNodes(t, clockSource, nodes, nodeIDs, values)
// Propose a 4th value
nodeIDs = append(nodeIDs, "id4")
v, err := raftutils.ProposeValue(t, nodes[1], DefaultProposalTime, "id4")
require.NoError(t, err, "failed to propose value")
values = append(values, v)
raftutils.CheckValuesOnNodes(t, clockSource, nodes, nodeIDs, values)
nodes[1].Server.Stop()
nodes[1].ShutdownRaft()
// Try to restart node 1. Without the new unlock key, it can't actually start
n, ctx := raftutils.CopyNode(t, clockSource, nodes[1], false, raftutils.NewSimpleKeyRotator(startingKeys))
require.Error(t, n.Node.JoinAndStart(ctx),
"should not have been able to restart since we can't read snapshots")
// with the right key, it can start, even if the right key is only the pending key
newKeys := startingKeys
newKeys.PendingDEK = []byte("key2")
nodes[1].KeyRotator = raftutils.NewSimpleKeyRotator(newKeys)
nodes[1] = raftutils.RestartNode(t, clockSource, nodes[1], false)
raftutils.WaitForCluster(t, clockSource, nodes)
// as soon as we joined, it should have finished rotating the key
require.False(t, nodes[1].KeyRotator.NeedsRotation())
raftutils.CheckValuesOnNodes(t, clockSource, nodes, nodeIDs, values)
// break snapshotting, and ensure that key rotation never finishes
tempSnapDir := filepath.Join(nodes[1].StateDir, "snap-backup")
require.NoError(t, os.Rename(snapDir, tempSnapDir))
require.NoError(t, ioutil.WriteFile(snapDir, []byte("this is no longer a directory"), 0644))
nodes[1].KeyRotator.QueuePendingKey([]byte("key3"))
nodes[1].KeyRotator.RotationNotify() <- struct{}{}
time.Sleep(250 * time.Millisecond)
// rotation has not been finished, because we cannot take a snapshot
require.True(t, nodes[1].KeyRotator.NeedsRotation())
// Propose a 5th value, so we have WALs written with the new key
nodeIDs = append(nodeIDs, "id5")
v, err = raftutils.ProposeValue(t, nodes[1], DefaultProposalTime, "id5")
require.NoError(t, err, "failed to propose value")
values = append(values, v)
raftutils.CheckValuesOnNodes(t, clockSource, nodes, nodeIDs, values)
nodes[1].Server.Stop()
nodes[1].ShutdownRaft()
// restore the snapshot dir
require.NoError(t, os.RemoveAll(snapDir))
require.NoError(t, os.Rename(tempSnapDir, snapDir))
// Now the wals are a mix of key2 and key3 - we can't actually start with either key
singleKey := raft.EncryptionKeys{CurrentDEK: []byte("key2")}
//.........這裏部分代碼省略.........