本文整理汇总了Golang中github.com/hashicorp/raft.NewFileSnapshotStore函数的典型用法代码示例。如果您正苦于以下问题:Golang NewFileSnapshotStore函数的具体用法?Golang NewFileSnapshotStore怎么用?Golang NewFileSnapshotStore使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了NewFileSnapshotStore函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: newRaft
func newRaft(home, addr string, peerStore *peerStoreWrapper, fsm raft.FSM, trans *raft.NetworkTransport, cfg *raft.Config) (*Raft, error) {
if err := os.MkdirAll(home, 0700); err != nil {
return nil, err
}
db, err := raftboltdb.NewBoltStore(filepath.Join(home, "raft.db"))
if err != nil {
return nil, fmt.Errorf("error initializing raft db: %v", err)
}
snapStore, err := raft.NewFileSnapshotStore(filepath.Join(home, "snapshots"), 5, nil)
if err != nil {
return nil, fmt.Errorf("error intializing raft snap store: %v", err)
}
r, err := raft.NewRaft(cfg, fsm, db, db, snapStore, peerStore, trans)
if err != nil {
return nil, err
}
raft := &Raft{
r: r,
peers: peerStore,
trans: trans,
db: db,
shutdownCh: make(chan struct{}),
pub: pubsub.NewPublisher(defaultTimeout, 1),
}
go raft.waitLeader()
return raft, nil
}
示例2: main
func main() {
buf, err := ioutil.ReadFile("./config.json")
if err != nil {
log.Fatal(err)
}
var v Config
err = json.Unmarshal(buf, &v)
dataDir := v.DataDir
os.MkdirAll(dataDir, 0755)
if err != nil {
log.Fatal(err)
}
cfg := raft.DefaultConfig()
// cfg.EnableSingleNode = true
fsm := new(Word)
fsm.words = "hahaha"
dbStore, err := raftboltdb.NewBoltStore(path.Join(dataDir, "raft_db"))
if err != nil {
log.Fatal(err)
}
fileStore, err := raft.NewFileSnapshotStore(dataDir, 1, os.Stdout)
if err != nil {
log.Fatal(err)
}
trans, err := raft.NewTCPTransport(v.Bind, nil, 3, 5*time.Second, os.Stdout)
if err != nil {
log.Fatal(err)
}
peers := make([]string, 0, 10)
peers = raft.AddUniquePeer(peers, "192.168.78.151:12345")
peers = raft.AddUniquePeer(peers, "192.168.78.151:12346")
peers = raft.AddUniquePeer(peers, "192.168.78.151:12347")
peerStore := raft.NewJSONPeers(dataDir, trans)
peerStore.SetPeers(peers)
r, err := raft.NewRaft(cfg, fsm, dbStore, dbStore, fileStore, peerStore, trans)
t := time.NewTicker(time.Duration(1) * time.Second)
for {
select {
case <-t.C:
fmt.Println(r.Leader())
}
}
}
示例3: Open
// Open opens the store. If enableSingle is set, and there are no existing peers,
// then this node becomes the first node, and therefore leader, of the cluster.
func (s *Store) Open(enableSingle bool) error {
// Setup Raft configuration.
config := raft.DefaultConfig()
// Check for any existing peers.
peers, err := readPeersJSON(filepath.Join(s.RaftDir, "peers.json"))
if err != nil {
return err
}
// Allow the node to entry single-mode, potentially electing itself, if
// explicitly enabled and there is only 1 node in the cluster already.
if enableSingle && len(peers) <= 1 {
s.logger.Println("enabling single-node mode")
config.EnableSingleNode = true
config.DisableBootstrapAfterElect = false
}
// Setup Raft communication.
addr, err := net.ResolveTCPAddr("tcp", s.RaftBind)
if err != nil {
return err
}
transport, err := raft.NewTCPTransport(s.RaftBind, addr, 3, 10*time.Second, os.Stderr)
if err != nil {
return err
}
// Create peer storage.
peerStore := raft.NewJSONPeers(s.RaftDir, transport)
// Create the snapshot store. This allows the Raft to truncate the log.
snapshots, err := raft.NewFileSnapshotStore(s.RaftDir, retainSnapshotCount, os.Stderr)
if err != nil {
return fmt.Errorf("file snapshot store: %s", err)
}
// Create the log store and stable store.
logStore, err := raftboltdb.NewBoltStore(filepath.Join(s.RaftDir, "raft.db"))
if err != nil {
return fmt.Errorf("new bolt store: %s", err)
}
// Instantiate the Raft systems.
ra, err := raft.NewRaft(config, (*fsm)(s), logStore, logStore, snapshots, peerStore, transport)
if err != nil {
return fmt.Errorf("new raft: %s", err)
}
s.raft = ra
return nil
}
示例4: openRaft
func (r *localRaft) openRaft() error {
s := r.store
// Setup raft configuration.
config := raft.DefaultConfig()
config.Logger = s.Logger
config.HeartbeatTimeout = s.HeartbeatTimeout
config.ElectionTimeout = s.ElectionTimeout
config.LeaderLeaseTimeout = s.LeaderLeaseTimeout
config.CommitTimeout = s.CommitTimeout
// If no peers are set in the config then start as a single server.
config.EnableSingleNode = (len(s.peers) == 0)
// Ensure our addr is in the peer list
if config.EnableSingleNode {
s.peers = append(s.peers, s.Addr.String())
}
// Build raft layer to multiplex listener.
r.raftLayer = newRaftLayer(s.RaftListener, s.Addr)
// Create a transport layer
r.transport = raft.NewNetworkTransport(r.raftLayer, 3, 10*time.Second, os.Stderr)
// Create peer storage.
r.peerStore = raft.NewJSONPeers(s.path, r.transport)
// Create the log store and stable store.
store, err := raftboltdb.NewBoltStore(filepath.Join(s.path, "raft.db"))
if err != nil {
return fmt.Errorf("new bolt store: %s", err)
}
r.raftStore = store
// Create the snapshot store.
snapshots, err := raft.NewFileSnapshotStore(s.path, raftSnapshotsRetained, os.Stderr)
if err != nil {
return fmt.Errorf("file snapshot store: %s", err)
}
// Create raft log.
ra, err := raft.NewRaft(config, (*storeFSM)(s), store, store, snapshots, r.peerStore, r.transport)
if err != nil {
return fmt.Errorf("new raft: %s", err)
}
r.raft = ra
return nil
}
示例5: makeRaft
// makeRaft returns a Raft and its FSM, with snapshots based in the given dir.
func makeRaft(t *testing.T, dir string) (*raft.Raft, *MockFSM) {
snaps, err := raft.NewFileSnapshotStore(dir, 5, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
fsm := &MockFSM{}
store := raft.NewInmemStore()
addr, trans := raft.NewInmemTransport("")
config := raft.DefaultConfig()
config.LocalID = raft.ServerID(fmt.Sprintf("server-%s", addr))
var members raft.Configuration
members.Servers = append(members.Servers, raft.Server{
Suffrage: raft.Voter,
ID: config.LocalID,
Address: addr,
})
err = raft.BootstrapCluster(config, store, store, snaps, trans, members)
if err != nil {
t.Fatalf("err: %v", err)
}
raft, err := raft.NewRaft(config, fsm, store, store, snaps, trans)
if err != nil {
t.Fatalf("err: %v", err)
}
timeout := time.After(10 * time.Second)
for {
if raft.Leader() != "" {
break
}
select {
case <-raft.LeaderCh():
case <-time.After(1 * time.Second):
// Need to poll because we might have missed the first
// go with the leader channel.
case <-timeout:
t.Fatalf("timed out waiting for leader")
}
}
return raft, fsm
}
示例6: NewRaft
func NewRaft(cfg *config.Raft, fsm raft.FSM, trans raft.Transport) (*raft.Raft, error) {
raftLogDir := filepath.Join(cfg.DataDir, "log")
raftMetaDir := filepath.Join(cfg.DataDir, "meta")
logStore, err := raftleveldb.NewStore(raftLogDir)
if err != nil {
return nil, err
}
metaStore, err := raftleveldb.NewStore(raftMetaDir)
if err != nil {
return nil, err
}
snapshotStore, err := raft.NewFileSnapshotStore(cfg.DataDir, 3, os.Stderr)
if err != nil {
return nil, err
}
peerStore := raft.NewJSONPeers(cfg.DataDir, trans)
raftConfig := raft.DefaultConfig()
raftConfig.SnapshotInterval = time.Duration(cfg.SnapshotInterval)
raftConfig.SnapshotThreshold = cfg.SnapshotThreshold
raftConfig.EnableSingleNode = cfg.EnableSingleNode
err = raft.ValidateConfig(raftConfig)
if err != nil {
return nil, err
}
return raft.NewRaft(
raftConfig,
fsm,
logStore,
metaStore,
snapshotStore,
peerStore,
trans,
)
}
示例7: setupRaft
// setupRaft is used to setup and initialize Raft
func (c *cerebrum) setupRaft() error {
// If we are in bootstrap mode, enable a single node cluster
if c.config.Bootstrap {
c.config.RaftConfig.EnableSingleNode = true
}
// Create the base state path
statePath := filepath.Join(c.config.DataPath, tmpStatePath)
if err := os.RemoveAll(statePath); err != nil {
return err
}
if err := os.MkdirAll(filepath.Dir(statePath), 0755); err != nil {
return err
}
// Create the base raft path
path := filepath.Join(c.config.DataPath, RaftStateDir)
if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil {
return err
}
// Create the backend raft store for logs and stable storage
store, err := raftboltdb.NewBoltStore(filepath.Join(path, "raft.db"))
if err != nil {
return err
}
c.raftStore = store
// Wrap the store in a LogCache to improve performance
cacheStore, err := raft.NewLogCache(c.config.LogCacheSize, store)
if err != nil {
store.Close()
return err
}
// Create the snapshot store
snapshots, err := raft.NewFileSnapshotStore(path, c.config.SnapshotsRetained, c.config.LogOutput)
if err != nil {
store.Close()
return err
}
// Try to bind
addr, err := net.ResolveTCPAddr("tcp", c.config.RaftBindAddr)
if err != nil {
return err
}
// Start TCP listener
listener, err := net.ListenTCP("tcp", addr)
if err != nil {
return err
}
// Create connection layer and transport
layer := NewRaftLayer(c.dialer, listener.Addr(), c.config.TLSConfig)
c.raftTransport = raft.NewNetworkTransport(layer, 3, 10*time.Second, c.config.LogOutput)
// Create TLS connection dispatcher
dispatcher := yamuxer.NewDispatcher(log.NewLogger(c.config.LogOutput, "dispatcher"), nil)
dispatcher.Register(connRaft, layer)
dispatcher.Register(connForward, &ForwardingHandler{c.applier, log.NewLogger(c.config.LogOutput, "forwarder")})
// Create TLS connection muxer
c.muxer = yamuxer.New(c.context, &yamuxer.Config{
Listener: listener,
TLSConfig: c.config.TLSConfig,
Deadline: c.config.ConnectionDeadline,
LogOutput: c.config.LogOutput,
Dispatcher: dispatcher,
})
// Setup the peer store
c.raftPeers = raft.NewJSONPeers(path, c.raftTransport)
// Ensure local host is always included if we are in bootstrap mode
if c.config.Bootstrap {
peers, err := c.raftPeers.Peers()
if err != nil {
store.Close()
return err
}
if !raft.PeerContained(peers, c.raftTransport.LocalAddr()) {
c.raftPeers.SetPeers(raft.AddUniquePeer(peers, c.raftTransport.LocalAddr()))
}
}
// Make sure we set the LogOutput
c.config.RaftConfig.LogOutput = c.config.LogOutput
// Setup the Raft store
c.raft, err = raft.NewRaft(c.config.RaftConfig, c.fsm, cacheStore, store,
snapshots, c.raftPeers, c.raftTransport)
if err != nil {
store.Close()
c.raftTransport.Close()
return err
}
//.........这里部分代码省略.........
示例8: Open
// Open opens the store. If enableSingle is set, and there are no existing peers,
// then this node becomes the first node, and therefore leader, of the cluster.
func (s *Store) Open(enableSingle bool) error {
if err := os.MkdirAll(s.raftDir, 0755); err != nil {
return err
}
// Create the database. Unless it's a memory-based database, it must be deleted
var db *sql.DB
var err error
if !s.dbConf.Memory {
// as it will be rebuilt from (possibly) a snapshot and committed log entries.
if err := os.Remove(s.dbPath); err != nil && !os.IsNotExist(err) {
return err
}
db, err = sql.OpenWithDSN(s.dbPath, s.dbConf.DSN)
if err != nil {
return err
}
s.logger.Println("SQLite database opened at", s.dbPath)
} else {
db, err = sql.OpenInMemoryWithDSN(s.dbConf.DSN)
if err != nil {
return err
}
s.logger.Println("SQLite in-memory database opened")
}
s.db = db
// Setup Raft configuration.
config := raft.DefaultConfig()
// Check for any existing peers.
peers, err := readPeersJSON(filepath.Join(s.raftDir, "peers.json"))
if err != nil {
return err
}
s.joinRequired = len(peers) <= 1
// Allow the node to entry single-mode, potentially electing itself, if
// explicitly enabled and there is only 1 node in the cluster already.
if enableSingle && len(peers) <= 1 {
s.logger.Println("enabling single-node mode")
config.EnableSingleNode = true
config.DisableBootstrapAfterElect = false
}
// Setup Raft communication.
transport := raft.NewNetworkTransport(s.raftTransport, 3, 10*time.Second, os.Stderr)
// Create peer storage.
peerStore := raft.NewJSONPeers(s.raftDir, transport)
// Create the snapshot store. This allows Raft to truncate the log.
snapshots, err := raft.NewFileSnapshotStore(s.raftDir, retainSnapshotCount, os.Stderr)
if err != nil {
return fmt.Errorf("file snapshot store: %s", err)
}
// Create the log store and stable store.
logStore, err := raftboltdb.NewBoltStore(filepath.Join(s.raftDir, "raft.db"))
if err != nil {
return fmt.Errorf("new bolt store: %s", err)
}
// Instantiate the Raft system.
ra, err := raft.NewRaft(config, s, logStore, logStore, snapshots, peerStore, transport)
if err != nil {
return fmt.Errorf("new raft: %s", err)
}
s.raft = ra
return nil
}
示例9: setupRaft
// setupRaft is used to setup and initialize Raft
func (s *Server) setupRaft() error {
// If we are in bootstrap mode, enable a single node cluster
if s.config.Bootstrap {
s.config.RaftConfig.EnableSingleNode = true
}
// Create the base state path
statePath := filepath.Join(s.config.DataDir, tmpStatePath)
if err := os.RemoveAll(statePath); err != nil {
return err
}
if err := ensurePath(statePath, true); err != nil {
return err
}
// Create the FSM
var err error
s.fsm, err = NewFSM(s.tombstoneGC, statePath, s.config.LogOutput)
if err != nil {
return err
}
// Set the maximum raft size based on 32/64bit. Since we are
// doing an mmap underneath, we need to limit our use of virtual
// address space on 32bit, but don't have to care on 64bit.
dbSize := raftDBSize32bit
if runtime.GOARCH == "amd64" {
dbSize = raftDBSize64bit
}
// Create the base raft path
path := filepath.Join(s.config.DataDir, raftState)
if err := ensurePath(path, true); err != nil {
return err
}
// Create the MDB store for logs and stable storage
store, err := raftmdb.NewMDBStoreWithSize(path, dbSize)
if err != nil {
return err
}
s.raftStore = store
// Wrap the store in a LogCache to improve performance
cacheStore, err := raft.NewLogCache(raftLogCacheSize, store)
if err != nil {
store.Close()
return err
}
// Create the snapshot store
snapshots, err := raft.NewFileSnapshotStore(path, snapshotsRetained, s.config.LogOutput)
if err != nil {
store.Close()
return err
}
// Create a transport layer
trans := raft.NewNetworkTransport(s.raftLayer, 3, 10*time.Second, s.config.LogOutput)
s.raftTransport = trans
// Setup the peer store
s.raftPeers = raft.NewJSONPeers(path, trans)
// Ensure local host is always included if we are in bootstrap mode
if s.config.Bootstrap {
peers, err := s.raftPeers.Peers()
if err != nil {
store.Close()
return err
}
if !raft.PeerContained(peers, trans.LocalAddr()) {
s.raftPeers.SetPeers(raft.AddUniquePeer(peers, trans.LocalAddr()))
}
}
// Make sure we set the LogOutput
s.config.RaftConfig.LogOutput = s.config.LogOutput
// Setup the Raft store
s.raft, err = raft.NewRaft(s.config.RaftConfig, s.fsm, cacheStore, store,
snapshots, s.raftPeers, trans)
if err != nil {
store.Close()
trans.Close()
return err
}
// Start monitoring leadership
go s.monitorLeadership()
return nil
}
示例10: newRaft
func newRaft(a *App) (Cluster, error) {
r := new(Raft)
if len(a.config.Raft.Addr) == 0 {
return nil, nil
}
peers := make([]string, 0, len(a.config.Raft.Cluster))
r.raftAddr = a.config.Raft.Addr
addr, err := net.ResolveTCPAddr("tcp", r.raftAddr)
if err != nil {
return nil, fmt.Errorf("invalid raft addr format %s, must host:port, err:%v", r.raftAddr, err)
}
peers = raft.AddUniquePeer(peers, addr.String())
for _, cluster := range a.config.Raft.Cluster {
addr, err = net.ResolveTCPAddr("tcp", cluster)
if err != nil {
return nil, fmt.Errorf("invalid cluster format %s, must host:port, err:%v", cluster, err)
}
peers = raft.AddUniquePeer(peers, addr.String())
}
os.MkdirAll(a.config.Raft.DataDir, 0755)
cfg := raft.DefaultConfig()
if len(a.config.Raft.LogDir) == 0 {
r.log = os.Stdout
} else {
os.MkdirAll(a.config.Raft.LogDir, 0755)
logFile := path.Join(a.config.Raft.LogDir, "raft.log")
f, err := os.OpenFile(logFile, os.O_CREATE|os.O_APPEND|os.O_RDWR, 0644)
if err != nil {
return nil, err
}
r.log = f
cfg.LogOutput = r.log
}
raftDBPath := path.Join(a.config.Raft.DataDir, "raft_db")
r.dbStore, err = raftboltdb.NewBoltStore(raftDBPath)
if err != nil {
return nil, err
}
fileStore, err := raft.NewFileSnapshotStore(a.config.Raft.DataDir, 1, r.log)
if err != nil {
return nil, err
}
r.trans, err = raft.NewTCPTransport(r.raftAddr, nil, 3, 5*time.Second, r.log)
if err != nil {
return nil, err
}
r.peerStore = raft.NewJSONPeers(a.config.Raft.DataDir, r.trans)
if a.config.Raft.ClusterState == ClusterStateNew {
log.Printf("[INFO] cluster state is new, use new cluster config")
r.peerStore.SetPeers(peers)
} else {
log.Printf("[INFO] cluster state is existing, use previous + new cluster config")
ps, err := r.peerStore.Peers()
if err != nil {
log.Printf("[INFO] get store peers error %v", err)
return nil, err
}
for _, peer := range peers {
ps = raft.AddUniquePeer(ps, peer)
}
r.peerStore.SetPeers(ps)
}
if peers, _ := r.peerStore.Peers(); len(peers) <= 1 {
cfg.EnableSingleNode = true
log.Println("[INFO] raft will run in single node mode, may only be used in test")
}
r.r, err = raft.NewRaft(cfg, a.fsm, r.dbStore, r.dbStore, fileStore, r.peerStore, r.trans)
return r, err
}
示例11: TestCompaction
// TestCompaction does a full snapshot, persists it to disk, restores it and
// makes sure the state matches expectations. The other test functions directly
// test what should be compacted.
func TestCompaction(t *testing.T) {
ircServer = ircserver.NewIRCServer("", "testnetwork", time.Now())
tempdir, err := ioutil.TempDir("", "robust-test-")
if err != nil {
t.Fatalf("ioutil.TempDir: %v", err)
}
defer os.RemoveAll(tempdir)
flag.Set("raftdir", tempdir)
logstore, err := raft_store.NewLevelDBStore(filepath.Join(tempdir, "raftlog"), false)
if err != nil {
t.Fatalf("Unexpected error in NewLevelDBStore: %v", err)
}
ircstore, err := raft_store.NewLevelDBStore(filepath.Join(tempdir, "irclog"), false)
if err != nil {
t.Fatalf("Unexpected error in NewLevelDBStore: %v", err)
}
fsm := FSM{logstore, ircstore}
var logs []*raft.Log
logs = appendLog(logs, `{"Id": {"Id": 1}, "Type": 0, "Data": "auth"}`)
logs = appendLog(logs, `{"Id": {"Id": 2}, "Session": {"Id": 1}, "Type": 2, "Data": "NICK sECuRE"}`)
logs = appendLog(logs, `{"Id": {"Id": 3}, "Session": {"Id": 1}, "Type": 2, "Data": "USER blah 0 * :Michael Stapelberg"}`)
logs = appendLog(logs, `{"Id": {"Id": 4}, "Session": {"Id": 1}, "Type": 2, "Data": "NICK secure_"}`)
logs = appendLog(logs, `{"Id": {"Id": 5}, "Session": {"Id": 1}, "Type": 2, "Data": "JOIN #chaos-hd"}`)
logs = appendLog(logs, `{"Id": {"Id": 6}, "Session": {"Id": 1}, "Type": 2, "Data": "JOIN #i3"}`)
logs = appendLog(logs, `{"Id": {"Id": 7}, "Session": {"Id": 1}, "Type": 2, "Data": "PRIVMSG #chaos-hd :heya"}`)
logs = appendLog(logs, `{"Id": {"Id": 8}, "Session": {"Id": 1}, "Type": 2, "Data": "PRIVMSG #chaos-hd :newer message"}`)
logs = appendLog(logs, `{"Id": {"Id": 9}, "Session": {"Id": 1}, "Type": 2, "Data": "PART #i3"}`)
// These messages are too new to be compacted.
nowID := time.Now().UnixNano()
logs = appendLog(logs, `{"Id": {"Id": `+strconv.FormatInt(nowID, 10)+`}, "Session": {"Id": 1}, "Type": 2, "Data": "PART #chaos-hd"}`)
nowID++
logs = appendLog(logs, `{"Id": {"Id": `+strconv.FormatInt(nowID, 10)+`}, "Session": {"Id": 1}, "Type": 2, "Data": "JOIN #chaos-hd"}`)
if err := logstore.StoreLogs(logs); err != nil {
t.Fatalf("Unexpected error in store.StoreLogs: %v", err)
}
for _, log := range logs {
fsm.Apply(log)
}
verifyEndState(t)
snapshot, err := fsm.Snapshot()
if err != nil {
t.Fatalf("Unexpected error in fsm.Snapshot(): %v", err)
}
robustsnap, ok := snapshot.(*robustSnapshot)
if !ok {
t.Fatalf("fsm.Snapshot() return value is not a robustSnapshot")
}
if robustsnap.lastIndex != uint64(len(logs)) {
t.Fatalf("snapshot does not retain the last message, got: %d, want: %d", robustsnap.lastIndex, len(logs))
}
fss, err := raft.NewFileSnapshotStore(tempdir, 5, nil)
if err != nil {
t.Fatalf("%v", err)
}
sink, err := fss.Create(uint64(len(logs)), 1, []byte{})
if err != nil {
t.Fatalf("fss.Create: %v", err)
}
if err := snapshot.Persist(sink); err != nil {
t.Fatalf("Unexpected error in snapshot.Persist(): %v", err)
}
snapshots, err := fss.List()
if err != nil {
t.Fatalf("fss.List(): %v", err)
}
if len(snapshots) != 1 {
t.Fatalf("len(snapshots): got %d, want 1", len(snapshots))
}
_, readcloser, err := fss.Open(snapshots[0].ID)
if err != nil {
t.Fatalf("fss.Open(%s): %v", snapshots[0].ID, err)
}
if err := fsm.Restore(readcloser); err != nil {
t.Fatalf("fsm.Restore(): %v", err)
}
first, _ := ircstore.FirstIndex()
last, _ := ircstore.LastIndex()
if last-first >= uint64(len(logs)) {
t.Fatalf("Compaction did not decrease log size. got: %d, want: < %d", last-first, len(logs))
}
//.........这里部分代码省略.........
示例12: Open
// Open starts the raft consensus and opens the store.
func (s *Store) Open() error {
s.mu.Lock()
defer s.mu.Unlock()
// Set up logging.
s.logger = log.New(s.LogOutput, "[discoverd] ", log.LstdFlags)
// Require listener & advertise address.
if s.Listener == nil {
return ErrListenerRequired
} else if s.Advertise == nil {
return ErrAdvertiseRequired
}
// Create root directory.
if err := os.MkdirAll(s.path, 0777); err != nil {
return err
}
// Create raft configuration.
config := raft.DefaultConfig()
config.HeartbeatTimeout = s.HeartbeatTimeout
config.ElectionTimeout = s.ElectionTimeout
config.LeaderLeaseTimeout = s.LeaderLeaseTimeout
config.CommitTimeout = s.CommitTimeout
config.LogOutput = s.LogOutput
config.EnableSingleNode = s.EnableSingleNode
config.ShutdownOnRemove = false
// Create multiplexing transport layer.
raftLayer := newRaftLayer(s.Listener, s.Advertise)
// Begin listening to TCP port.
s.transport = raft.NewNetworkTransport(raftLayer, 3, 10*time.Second, os.Stderr)
// Setup storage layers.
s.peerStore = raft.NewJSONPeers(s.path, s.transport)
stableStore, err := raftboltdb.NewBoltStore(filepath.Join(s.path, "raft.db"))
if err != nil {
return fmt.Errorf("stable store: %s", err)
}
s.stableStore = stableStore
// Wrap the store in a LogCache to improve performance
cacheStore, err := raft.NewLogCache(512, stableStore)
if err != nil {
stableStore.Close()
return fmt.Errorf("log cache: %s", err)
}
// Create the snapshot store.
ss, err := raft.NewFileSnapshotStore(s.path, 2, os.Stderr)
if err != nil {
return fmt.Errorf("snapshot store: %s", err)
}
// Create raft log.
//
// The mutex must be unlocked as initializing the raft store may
// call back into methods which acquire the lock (e.g. Restore)
s.mu.Unlock()
r, err := raft.NewRaft(config, s, cacheStore, stableStore, ss, s.peerStore, s.transport)
s.mu.Lock()
if err != nil {
return fmt.Errorf("raft: %s", err)
}
// make sure the store was not closed whilst the mutex was unlocked
select {
case <-s.closing:
return ErrShutdown
default:
}
s.raft = r
// Start goroutine to monitor leadership changes.
s.wg.Add(1)
go s.monitorLeaderCh()
// Start goroutine to check for instance expiry.
s.wg.Add(1)
go s.expirer()
return nil
}
示例13: setupRaft
// setupRaft is used to setup and initialize Raft
func (s *Server) setupRaft() error {
// If we are in bootstrap mode, enable a single node cluster
if s.config.Bootstrap {
s.config.RaftConfig.EnableSingleNode = true
}
// Create the base path
path := filepath.Join(s.config.DataDir, raftState)
if err := ensurePath(path, true); err != nil {
return err
}
// Create the FSM
var err error
s.fsm, err = NewFSM(s.config.LogOutput)
if err != nil {
return err
}
// Create the MDB store for logs and stable storage
store, err := raftmdb.NewMDBStoreWithSize(path, raftDBSize)
if err != nil {
return err
}
s.raftStore = store
// Create the snapshot store
snapshots, err := raft.NewFileSnapshotStore(path, snapshotsRetained, s.config.LogOutput)
if err != nil {
store.Close()
return err
}
// Create a transport layer
trans := raft.NewNetworkTransport(s.raftLayer, 3, 10*time.Second, s.config.LogOutput)
s.raftTransport = trans
// Setup the peer store
s.raftPeers = raft.NewJSONPeers(path, trans)
// Ensure local host is always included if we are in bootstrap mode
if s.config.Bootstrap {
peers, err := s.raftPeers.Peers()
if err != nil {
store.Close()
return err
}
if !raft.PeerContained(peers, trans.LocalAddr()) {
s.raftPeers.SetPeers(raft.AddUniquePeer(peers, trans.LocalAddr()))
}
}
// Make sure we set the LogOutput
s.config.RaftConfig.LogOutput = s.config.LogOutput
// Setup the Raft store
s.raft, err = raft.NewRaft(s.config.RaftConfig, s.fsm, store, store,
snapshots, s.raftPeers, trans)
if err != nil {
store.Close()
trans.Close()
return err
}
// Start monitoring leadership
go s.monitorLeadership()
return nil
}
示例14: setupRaft
// setupRaft is used to setup and initialize Raft
func (s *Server) setupRaft() error {
// If we have an unclean exit then attempt to close the Raft store.
defer func() {
if s.raft == nil && s.raftStore != nil {
if err := s.raftStore.Close(); err != nil {
s.logger.Printf("[ERR] consul: failed to close Raft store: %v", err)
}
}
}()
// Create the FSM.
var err error
s.fsm, err = NewFSM(s.tombstoneGC, s.config.LogOutput)
if err != nil {
return err
}
// Create a transport layer.
trans := raft.NewNetworkTransport(s.raftLayer, 3, 10*time.Second, s.config.LogOutput)
s.raftTransport = trans
// Make sure we set the LogOutput.
s.config.RaftConfig.LogOutput = s.config.LogOutput
// Our version of Raft protocol requires the LocalID to match the network
// address of the transport.
s.config.RaftConfig.LocalID = raft.ServerID(trans.LocalAddr())
// Build an all in-memory setup for dev mode, otherwise prepare a full
// disk-based setup.
var log raft.LogStore
var stable raft.StableStore
var snap raft.SnapshotStore
if s.config.DevMode {
store := raft.NewInmemStore()
s.raftInmem = store
stable = store
log = store
snap = raft.NewInmemSnapshotStore()
} else {
// Create the base raft path.
path := filepath.Join(s.config.DataDir, raftState)
if err := ensurePath(path, true); err != nil {
return err
}
// Create the backend raft store for logs and stable storage.
store, err := raftboltdb.NewBoltStore(filepath.Join(path, "raft.db"))
if err != nil {
return err
}
s.raftStore = store
stable = store
// Wrap the store in a LogCache to improve performance.
cacheStore, err := raft.NewLogCache(raftLogCacheSize, store)
if err != nil {
return err
}
log = cacheStore
// Create the snapshot store.
snapshots, err := raft.NewFileSnapshotStore(path, snapshotsRetained, s.config.LogOutput)
if err != nil {
return err
}
snap = snapshots
// For an existing cluster being upgraded to the new version of
// Raft, we almost never want to run recovery based on the old
// peers.json file. We create a peers.info file with a helpful
// note about where peers.json went, and use that as a sentinel
// to avoid ingesting the old one that first time (if we have to
// create the peers.info file because it's not there, we also
// blow away any existing peers.json file).
peersFile := filepath.Join(path, "peers.json")
peersInfoFile := filepath.Join(path, "peers.info")
if _, err := os.Stat(peersInfoFile); os.IsNotExist(err) {
if err := ioutil.WriteFile(peersInfoFile, []byte(peersInfoContent), 0755); err != nil {
return fmt.Errorf("failed to write peers.info file: %v", err)
}
// Blow away the peers.json file if present, since the
// peers.info sentinel wasn't there.
if _, err := os.Stat(peersFile); err == nil {
if err := os.Remove(peersFile); err != nil {
return fmt.Errorf("failed to delete peers.json, please delete manually (see peers.info for details): %v", err)
}
s.logger.Printf("[INFO] consul: deleted peers.json file (see peers.info for details)")
}
} else if _, err := os.Stat(peersFile); err == nil {
s.logger.Printf("[INFO] consul: found peers.json file, recovering Raft configuration...")
configuration, err := raft.ReadPeersJSON(peersFile)
if err != nil {
return fmt.Errorf("recovery failed to parse peers.json: %v", err)
}
tmpFsm, err := NewFSM(s.tombstoneGC, s.config.LogOutput)
if err != nil {
return fmt.Errorf("recovery failed to make temp FSM: %v", err)
//.........这里部分代码省略.........
示例15: open
func (r *localRaft) open() error {
r.closing = make(chan struct{})
s := r.store
// Setup raft configuration.
config := raft.DefaultConfig()
config.LogOutput = ioutil.Discard
if s.clusterTracingEnabled {
config.Logger = s.Logger
}
config.HeartbeatTimeout = s.HeartbeatTimeout
config.ElectionTimeout = s.ElectionTimeout
config.LeaderLeaseTimeout = s.LeaderLeaseTimeout
config.CommitTimeout = s.CommitTimeout
// Since we actually never call `removePeer` this is safe.
// If in the future we decide to call remove peer we have to re-evaluate how to handle this
config.ShutdownOnRemove = false
// If no peers are set in the config or there is one and we are it, then start as a single server.
if len(s.peers) <= 1 {
config.EnableSingleNode = true
// Ensure we can always become the leader
config.DisableBootstrapAfterElect = false
}
// Build raft layer to multiplex listener.
r.raftLayer = newRaftLayer(s.RaftListener, s.RemoteAddr)
// Create a transport layer
r.transport = raft.NewNetworkTransport(r.raftLayer, 3, 10*time.Second, config.LogOutput)
// Create peer storage.
r.peerStore = raft.NewJSONPeers(s.path, r.transport)
peers, err := r.peerStore.Peers()
if err != nil {
return err
}
// For single-node clusters, we can update the raft peers before we start the cluster if the hostname
// has changed.
if config.EnableSingleNode {
if err := r.peerStore.SetPeers([]string{s.RemoteAddr.String()}); err != nil {
return err
}
peers = []string{s.RemoteAddr.String()}
}
// If we have multiple nodes in the cluster, make sure our address is in the raft peers or
// we won't be able to boot into the cluster because the other peers will reject our new hostname. This
// is difficult to resolve automatically because we need to have all the raft peers agree on the current members
// of the cluster before we can change them.
if len(peers) > 0 && !raft.PeerContained(peers, s.RemoteAddr.String()) {
s.Logger.Printf("%s is not in the list of raft peers. Please update %v/peers.json on all raft nodes to have the same contents.", s.RemoteAddr.String(), s.Path())
return fmt.Errorf("peers out of sync: %v not in %v", s.RemoteAddr.String(), peers)
}
// Create the log store and stable store.
store, err := raftboltdb.NewBoltStore(filepath.Join(s.path, "raft.db"))
if err != nil {
return fmt.Errorf("new bolt store: %s", err)
}
r.raftStore = store
// Create the snapshot store.
snapshots, err := raft.NewFileSnapshotStore(s.path, raftSnapshotsRetained, os.Stderr)
if err != nil {
return fmt.Errorf("file snapshot store: %s", err)
}
// Create raft log.
ra, err := raft.NewRaft(config, (*storeFSM)(s), store, store, snapshots, r.peerStore, r.transport)
if err != nil {
return fmt.Errorf("new raft: %s", err)
}
r.raft = ra
r.wg.Add(1)
go r.logLeaderChanges()
return nil
}