本文整理汇总了Golang中github.com/coreos/etcd/snap.New函数的典型用法代码示例。如果您正苦于以下问题:Golang New函数的具体用法?Golang New怎么用?Golang New使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了New函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: handleBackup
// handleBackup handles a request that intends to do a backup.
func handleBackup(c *cli.Context) {
srcSnap := path.Join(c.String("data-dir"), "member", "snap")
destSnap := path.Join(c.String("backup-dir"), "member", "snap")
srcWAL := path.Join(c.String("data-dir"), "member", "wal")
destWAL := path.Join(c.String("backup-dir"), "member", "wal")
if err := os.MkdirAll(destSnap, 0700); err != nil {
log.Fatalf("failed creating backup snapshot dir %v: %v", destSnap, err)
}
ss := snap.New(srcSnap)
snapshot, err := ss.Load()
if err != nil && err != snap.ErrNoSnapshot {
log.Fatal(err)
}
var walsnap walpb.Snapshot
if snapshot != nil {
walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term
newss := snap.New(destSnap)
if err = newss.SaveSnap(*snapshot); err != nil {
log.Fatal(err)
}
}
w, err := wal.OpenForRead(srcWAL, walsnap)
if err != nil {
log.Fatal(err)
}
defer w.Close()
wmetadata, state, ents, err := w.ReadAll()
switch err {
case nil:
case wal.ErrSnapshotNotFound:
fmt.Printf("Failed to find the match snapshot record %+v in wal %v.", walsnap, srcWAL)
fmt.Printf("etcdctl will add it back. Start auto fixing...")
default:
log.Fatal(err)
}
var metadata etcdserverpb.Metadata
pbutil.MustUnmarshal(&metadata, wmetadata)
idgen := idutil.NewGenerator(0, time.Now())
metadata.NodeID = idgen.Next()
metadata.ClusterID = idgen.Next()
neww, err := wal.Create(destWAL, pbutil.MustMarshal(&metadata))
if err != nil {
log.Fatal(err)
}
defer neww.Close()
if err := neww.Save(state, ents); err != nil {
log.Fatal(err)
}
if err := neww.SaveSnapshot(walsnap); err != nil {
log.Fatal(err)
}
}
示例2: New
// NewSnapshotter returns a new Snapshotter with the given encrypters and decrypters
func (sc snapCryptor) New(dirpath string) Snapshotter {
return &wrappedSnap{
Snapshotter: snap.New(dirpath),
encrypter: sc.encrypter,
decrypter: sc.decrypter,
}
}
示例3: handleBackup
// handleBackup handles a request that intends to do a backup.
func handleBackup(c *cli.Context) {
srcSnap := path.Join(c.String("data-dir"), "snap")
destSnap := path.Join(c.String("backup-dir"), "snap")
srcWAL := path.Join(c.String("data-dir"), "wal")
destWAL := path.Join(c.String("backup-dir"), "wal")
if err := os.MkdirAll(destSnap, 0700); err != nil {
log.Fatalf("failed creating backup snapshot dir %v: %v", destSnap, err)
}
ss := snap.New(srcSnap)
snapshot, err := ss.Load()
if err != nil && err != snap.ErrNoSnapshot {
log.Fatal(err)
}
var index uint64
if snapshot != nil {
index = snapshot.Metadata.Index
newss := snap.New(destSnap)
if err := newss.SaveSnap(*snapshot); err != nil {
log.Fatal(err)
}
}
w, err := wal.OpenNotInUse(srcWAL, index)
if err != nil {
log.Fatal(err)
}
defer w.Close()
wmetadata, state, ents, err := w.ReadAll()
if err != nil {
log.Fatal(err)
}
var metadata etcdserverpb.Metadata
pbutil.MustUnmarshal(&metadata, wmetadata)
rand.Seed(time.Now().UnixNano())
metadata.NodeID = etcdserver.GenID()
metadata.ClusterID = etcdserver.GenID()
neww, err := wal.Create(destWAL, pbutil.MustMarshal(&metadata))
if err != nil {
log.Fatal(err)
}
defer neww.Close()
if err := neww.Save(state, ents); err != nil {
log.Fatal(err)
}
}
示例4: handleBackup
// handleBackup handles a request that intends to do a backup.
func handleBackup(c *cli.Context) {
srcSnap := path.Join(c.String("data-dir"), "snap")
destSnap := path.Join(c.String("backup-dir"), "snap")
srcWAL := path.Join(c.String("data-dir"), "wal")
destWAL := path.Join(c.String("backup-dir"), "wal")
if err := os.MkdirAll(destSnap, 0700); err != nil {
log.Fatalf("failed creating backup snapshot dir %v: %v", destSnap, err)
}
ss := snap.New(srcSnap)
snapshot, err := ss.Load()
if err != nil && err != snap.ErrNoSnapshot {
log.Fatal(err)
}
var walsnap walpb.Snapshot
if snapshot != nil {
walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term
newss := snap.New(destSnap)
if err := newss.SaveSnap(*snapshot); err != nil {
log.Fatal(err)
}
}
w, err := wal.OpenNotInUse(srcWAL, walsnap)
if err != nil {
log.Fatal(err)
}
defer w.Close()
wmetadata, state, ents, err := w.ReadAll()
if err != nil {
log.Fatal(err)
}
var metadata etcdserverpb.Metadata
pbutil.MustUnmarshal(&metadata, wmetadata)
idgen := idutil.NewGenerator(0, time.Now())
metadata.NodeID = idgen.Next()
metadata.ClusterID = idgen.Next()
neww, err := wal.Create(destWAL, pbutil.MustMarshal(&metadata))
if err != nil {
log.Fatal(err)
}
defer neww.Close()
if err := neww.Save(state, ents); err != nil {
log.Fatal(err)
}
}
示例5: rebuildStoreV2
func rebuildStoreV2() store.Store {
waldir := migrateWALdir
if len(waldir) == 0 {
waldir = path.Join(migrateDatadir, "member", "wal")
}
snapdir := path.Join(migrateDatadir, "member", "snap")
ss := snap.New(snapdir)
snapshot, err := ss.Load()
if err != nil && err != snap.ErrNoSnapshot {
ExitWithError(ExitError, err)
}
var walsnap walpb.Snapshot
if snapshot != nil {
walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term
}
w, err := wal.OpenForRead(waldir, walsnap)
if err != nil {
ExitWithError(ExitError, err)
}
defer w.Close()
_, _, ents, err := w.ReadAll()
if err != nil {
ExitWithError(ExitError, err)
}
st := store.New()
if snapshot != nil {
err := st.Recovery(snapshot.Data)
if err != nil {
ExitWithError(ExitError, err)
}
}
applier := etcdserver.NewApplierV2(st, nil)
for _, ent := range ents {
if ent.Type != raftpb.EntryNormal {
continue
}
var raftReq pb.InternalRaftRequest
if !pbutil.MaybeUnmarshal(&raftReq, ent.Data) { // backward compatible
var r pb.Request
pbutil.MustUnmarshal(&r, ent.Data)
applyRequest(&r, applier)
} else {
if raftReq.V2 != nil {
req := raftReq.V2
applyRequest(req, applier)
}
}
}
return st
}
示例6: startRaft
func (rc *raftNode) startRaft() {
if !fileutil.Exist(rc.snapdir) {
if err := os.Mkdir(rc.snapdir, 0750); err != nil {
log.Fatalf("raftexample: cannot create dir for snapshot (%v)", err)
}
}
rc.snapshotter = snap.New(rc.snapdir)
rc.snapshotterReady <- rc.snapshotter
oldwal := wal.Exist(rc.waldir)
rc.wal = rc.replayWAL()
rpeers := make([]raft.Peer, len(rc.peers))
for i := range rpeers {
rpeers[i] = raft.Peer{ID: uint64(i + 1)}
}
c := &raft.Config{
ID: uint64(rc.id),
ElectionTick: 10,
HeartbeatTick: 1,
Storage: rc.raftStorage,
MaxSizePerMsg: 1024 * 1024,
MaxInflightMsgs: 256,
}
if oldwal {
rc.node = raft.RestartNode(c)
} else {
startPeers := rpeers
if rc.join {
startPeers = nil
}
rc.node = raft.StartNode(c, startPeers)
}
ss := &stats.ServerStats{}
ss.Initialize()
rc.transport = &rafthttp.Transport{
ID: types.ID(rc.id),
ClusterID: 0x1000,
Raft: rc,
ServerStats: ss,
LeaderStats: stats.NewLeaderStats(strconv.Itoa(rc.id)),
ErrorC: make(chan error),
}
rc.transport.Start()
for i := range rc.peers {
if i+1 != rc.id {
rc.transport.AddPeer(types.ID(i+1), []string{rc.peers[i]})
}
}
go rc.serveRaft()
go rc.serveChannels()
}
示例7: loadAndStart
func (n *Node) loadAndStart(ctx context.Context, forceNewCluster bool) error {
walDir := n.walDir()
snapDir := n.snapDir()
if !fileutil.Exist(snapDir) {
// If snapshots created by the etcd-v2 code exist, hard link
// them at the new path. This prevents etc-v2 creating
// snapshots that are visible to us, but out of sync with our
// WALs, after a downgrade.
legacySnapDir := n.legacySnapDir()
if fileutil.Exist(legacySnapDir) {
if err := migrateSnapshots(legacySnapDir, snapDir); err != nil {
return err
}
} else if err := os.MkdirAll(snapDir, 0700); err != nil {
return errors.Wrap(err, "failed to create snapshot directory")
}
}
// Create a snapshotter
n.snapshotter = snap.New(snapDir)
if !wal.Exist(walDir) {
// If wals created by the etcd-v2 wal code exist, copy them to
// the new path to avoid adding backwards-incompatible entries
// to those files.
legacyWALDir := n.legacyWALDir()
if !wal.Exist(legacyWALDir) {
return errNoWAL
}
if err := migrateWALs(legacyWALDir, walDir); err != nil {
return err
}
}
// Load snapshot data
snapshot, err := n.snapshotter.Load()
if err != nil && err != snap.ErrNoSnapshot {
return err
}
if snapshot != nil {
// Load the snapshot data into the store
if err := n.restoreFromSnapshot(snapshot.Data, forceNewCluster); err != nil {
return err
}
}
// Read logs to fully catch up store
if err := n.readWAL(ctx, snapshot, forceNewCluster); err != nil {
return err
}
return nil
}
示例8: loadAndStart
func (n *Node) loadAndStart(ctx context.Context, forceNewCluster bool) error {
walDir := n.walDir()
snapDir := n.snapDir()
if err := os.MkdirAll(snapDir, 0700); err != nil {
return fmt.Errorf("create snapshot directory error: %v", err)
}
// Create a snapshotter
n.snapshotter = snap.New(snapDir)
if !wal.Exist(walDir) {
raftNode := &api.RaftMember{
RaftID: n.Config.ID,
Addr: n.Address,
}
metadata, err := raftNode.Marshal()
if err != nil {
return fmt.Errorf("error marshalling raft node: %v", err)
}
n.wal, err = wal.Create(walDir, metadata)
if err != nil {
return fmt.Errorf("create wal error: %v", err)
}
n.cluster.AddMember(&membership.Member{RaftMember: raftNode})
n.startNodePeers = []raft.Peer{{ID: n.Config.ID, Context: metadata}}
return nil
}
// Load snapshot data
snapshot, err := n.snapshotter.Load()
if err != nil && err != snap.ErrNoSnapshot {
return err
}
if snapshot != nil {
// Load the snapshot data into the store
if err := n.restoreFromSnapshot(snapshot.Data, forceNewCluster); err != nil {
return err
}
}
// Read logs to fully catch up store
if err := n.readWAL(ctx, snapshot, forceNewCluster); err != nil {
return err
}
n.Node = raft.RestartNode(n.Config)
return nil
}
示例9: testSnapshotSend
func testSnapshotSend(t *testing.T, sm *snap.Message) (bool, []os.FileInfo) {
d, err := ioutil.TempDir(os.TempDir(), "snapdir")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(d)
r := &fakeRaft{}
tr := &Transport{pipelineRt: &http.Transport{}, ClusterID: types.ID(1), Raft: r}
ch := make(chan struct{}, 1)
h := &syncHandler{newSnapshotHandler(tr, r, snap.New(d), types.ID(1)), ch}
srv := httptest.NewServer(h)
defer srv.Close()
picker := mustNewURLPicker(t, []string{srv.URL})
snapsend := newSnapshotSender(tr, picker, types.ID(1), newPeerStatus(types.ID(1)))
defer snapsend.stop()
snapsend.send(*sm)
sent := false
select {
case <-time.After(time.Second):
t.Fatalf("timed out sending snapshot")
case sent = <-sm.CloseNotify():
}
// wait for handler to finish accepting snapshot
<-ch
files, rerr := ioutil.ReadDir(d)
if rerr != nil {
t.Fatal(rerr)
}
return sent, files
}
示例10: NewServer
// NewServer creates a new EtcdServer from the supplied configuration. The
// configuration is considered static for the lifetime of the EtcdServer.
func NewServer(cfg *ServerConfig) (*EtcdServer, error) {
st := store.New()
var w *wal.WAL
var n raft.Node
var s *raft.MemoryStorage
var id types.ID
walVersion, err := wal.DetectVersion(cfg.DataDir)
if err != nil {
return nil, err
}
if walVersion == wal.WALUnknown {
return nil, fmt.Errorf("unknown wal version in data dir %s", cfg.DataDir)
}
haveWAL := walVersion != wal.WALNotExist
if haveWAL && walVersion != wal.WALv0_5 {
err := UpgradeWAL(cfg, walVersion)
if err != nil {
return nil, err
}
}
ss := snap.New(cfg.SnapDir())
switch {
case !haveWAL && !cfg.NewCluster:
us := getOtherPeerURLs(cfg.Cluster, cfg.Name)
existingCluster, err := GetClusterFromPeers(us)
if err != nil {
return nil, fmt.Errorf("cannot fetch cluster info from peer urls: %v", err)
}
if err := ValidateClusterAndAssignIDs(cfg.Cluster, existingCluster); err != nil {
return nil, fmt.Errorf("error validating peerURLs %s: %v", existingCluster, err)
}
cfg.Cluster.SetID(existingCluster.id)
cfg.Cluster.SetStore(st)
cfg.Print()
id, n, s, w = startNode(cfg, nil)
case !haveWAL && cfg.NewCluster:
if err := cfg.VerifyBootstrapConfig(); err != nil {
return nil, err
}
if err := checkClientURLsEmptyFromPeers(cfg.Cluster, cfg.Name); err != nil {
return nil, err
}
m := cfg.Cluster.MemberByName(cfg.Name)
if cfg.ShouldDiscover() {
s, err := discovery.JoinCluster(cfg.DiscoveryURL, cfg.DiscoveryProxy, m.ID, cfg.Cluster.String())
if err != nil {
return nil, err
}
if cfg.Cluster, err = NewClusterFromString(cfg.Cluster.token, s); err != nil {
return nil, err
}
}
cfg.Cluster.SetStore(st)
cfg.PrintWithInitial()
id, n, s, w = startNode(cfg, cfg.Cluster.MemberIDs())
case haveWAL:
if cfg.ShouldDiscover() {
log.Printf("etcdserver: discovery token ignored since a cluster has already been initialized. Valid log found at %q", cfg.WALDir())
}
var index uint64
snapshot, err := ss.Load()
if err != nil && err != snap.ErrNoSnapshot {
return nil, err
}
if snapshot != nil {
if err := st.Recovery(snapshot.Data); err != nil {
log.Panicf("etcdserver: recovered store from snapshot error: %v", err)
}
log.Printf("etcdserver: recovered store from snapshot at index %d", snapshot.Metadata.Index)
index = snapshot.Metadata.Index
}
cfg.Cluster = NewClusterFromStore(cfg.Cluster.token, st)
cfg.Print()
if snapshot != nil {
log.Printf("etcdserver: loaded cluster information from store: %s", cfg.Cluster)
}
if !cfg.ForceNewCluster {
id, n, s, w = restartNode(cfg, index+1, snapshot)
} else {
id, n, s, w = restartAsStandaloneNode(cfg, index+1, snapshot)
}
default:
return nil, fmt.Errorf("unsupported bootstrap config")
}
sstats := &stats.ServerStats{
Name: cfg.Name,
ID: id.String(),
}
lstats := stats.NewLeaderStats(id.String())
srv := &EtcdServer{
cfg: cfg,
store: st,
node: n,
raftStorage: s,
//.........这里部分代码省略.........
示例11: main
func main() {
from := flag.String("data-dir", "", "")
snapfile := flag.String("start-snap", "", "The base name of snapshot file to start dumping")
index := flag.Uint64("start-index", 0, "The index to start dumping")
flag.Parse()
if *from == "" {
log.Fatal("Must provide -data-dir flag.")
}
if *snapfile != "" && *index != 0 {
log.Fatal("start-snap and start-index flags cannot be used together.")
}
var (
walsnap walpb.Snapshot
snapshot *raftpb.Snapshot
err error
)
isIndex := *index != 0
if isIndex {
fmt.Printf("Start dumping log entries from index %d.\n", *index)
walsnap.Index = *index
} else {
if *snapfile == "" {
ss := snap.New(snapDir(*from))
snapshot, err = ss.Load()
} else {
snapshot, err = snap.Read(path.Join(snapDir(*from), *snapfile))
}
switch err {
case nil:
walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term
nodes := genIDSlice(snapshot.Metadata.ConfState.Nodes)
fmt.Printf("Snapshot:\nterm=%d index=%d nodes=%s\n",
walsnap.Term, walsnap.Index, nodes)
case snap.ErrNoSnapshot:
fmt.Printf("Snapshot:\nempty\n")
default:
log.Fatalf("Failed loading snapshot: %v", err)
}
fmt.Println("Start dupmping log entries from snapshot.")
}
w, err := wal.OpenForRead(walDir(*from), walsnap)
if err != nil {
log.Fatalf("Failed opening WAL: %v", err)
}
wmetadata, state, ents, err := w.ReadAll()
w.Close()
if err != nil && (!isIndex || err != wal.ErrSnapshotNotFound) {
log.Fatalf("Failed reading WAL: %v", err)
}
id, cid := parseWALMetadata(wmetadata)
vid := types.ID(state.Vote)
fmt.Printf("WAL metadata:\nnodeID=%s clusterID=%s term=%d commitIndex=%d vote=%s\n",
id, cid, state.Term, state.Commit, vid)
fmt.Printf("WAL entries:\n")
fmt.Printf("lastIndex=%d\n", ents[len(ents)-1].Index)
fmt.Printf("%4s\t%10s\ttype\tdata\n", "term", "index")
for _, e := range ents {
msg := fmt.Sprintf("%4d\t%10d", e.Term, e.Index)
switch e.Type {
case raftpb.EntryNormal:
msg = fmt.Sprintf("%s\tnorm", msg)
var rr etcdserverpb.InternalRaftRequest
if err := rr.Unmarshal(e.Data); err == nil {
msg = fmt.Sprintf("%s\t%s", msg, rr.String())
break
}
var r etcdserverpb.Request
if err := r.Unmarshal(e.Data); err == nil {
switch r.Method {
case "":
msg = fmt.Sprintf("%s\tnoop", msg)
case "SYNC":
msg = fmt.Sprintf("%s\tmethod=SYNC time=%q", msg, time.Unix(0, r.Time))
case "QGET", "DELETE":
msg = fmt.Sprintf("%s\tmethod=%s path=%s", msg, r.Method, excerpt(r.Path, 64, 64))
default:
msg = fmt.Sprintf("%s\tmethod=%s path=%s val=%s", msg, r.Method, excerpt(r.Path, 64, 64), excerpt(r.Val, 128, 0))
}
break
}
msg = fmt.Sprintf("%s\t???", msg)
case raftpb.EntryConfChange:
msg = fmt.Sprintf("%s\tconf", msg)
var r raftpb.ConfChange
if err := r.Unmarshal(e.Data); err != nil {
msg = fmt.Sprintf("%s\t???", msg)
} else {
msg = fmt.Sprintf("%s\tmethod=%s id=%s", msg, r.Type, types.ID(r.NodeID))
}
}
fmt.Println(msg)
}
//.........这里部分代码省略.........
示例12: rebuild
func rebuild(datadir string) ([]byte, *raftpb.HardState, store.Store, error) {
waldir := path.Join(datadir, "member", "wal")
snapdir := path.Join(datadir, "member", "snap")
ss := snap.New(snapdir)
snapshot, err := ss.Load()
if err != nil && err != snap.ErrNoSnapshot {
return nil, nil, nil, err
}
var walsnap walpb.Snapshot
if snapshot != nil {
walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term
}
w, err := wal.OpenForRead(waldir, walsnap)
if err != nil {
return nil, nil, nil, err
}
defer w.Close()
meta, hardstate, ents, err := w.ReadAll()
if err != nil {
return nil, nil, nil, err
}
st := store.New(etcdserver.StoreClusterPrefix, etcdserver.StoreKeysPrefix)
if snapshot != nil {
err := st.Recovery(snapshot.Data)
if err != nil {
return nil, nil, nil, err
}
}
cluster := membership.NewCluster("")
cluster.SetStore(st)
cluster.Recover(func(*semver.Version) {})
applier := etcdserver.NewApplierV2(st, cluster)
for _, ent := range ents {
if ent.Type == raftpb.EntryConfChange {
var cc raftpb.ConfChange
pbutil.MustUnmarshal(&cc, ent.Data)
switch cc.Type {
case raftpb.ConfChangeAddNode:
m := new(membership.Member)
if err := json.Unmarshal(cc.Context, m); err != nil {
return nil, nil, nil, err
}
cluster.AddMember(m)
case raftpb.ConfChangeRemoveNode:
id := types.ID(cc.NodeID)
cluster.RemoveMember(id)
case raftpb.ConfChangeUpdateNode:
m := new(membership.Member)
if err := json.Unmarshal(cc.Context, m); err != nil {
return nil, nil, nil, err
}
cluster.UpdateRaftAttributes(m.ID, m.RaftAttributes)
}
continue
}
var raftReq pb.InternalRaftRequest
if !pbutil.MaybeUnmarshal(&raftReq, ent.Data) { // backward compatible
var r pb.Request
pbutil.MustUnmarshal(&r, ent.Data)
applyRequest(&r, applier)
} else {
if raftReq.V2 != nil {
req := raftReq.V2
applyRequest(req, applier)
}
}
}
return meta, &hardstate, st, nil
}
示例13: main
func main() {
flag.Parse()
if len(*migrateDatadir) == 0 {
glog.Fatal("need to set '--data-dir'")
}
dbpath := path.Join(*migrateDatadir, "member", "snap", "db")
// etcd3 store backend. We will use it to parse v3 data files and extract information.
be := backend.NewDefaultBackend(dbpath)
tx := be.BatchTx()
// etcd2 store backend. We will use v3 data to update this and then save snapshot to disk.
st := store.New(etcdserver.StoreClusterPrefix, etcdserver.StoreKeysPrefix)
expireTime := time.Now().Add(*ttl)
tx.Lock()
err := tx.UnsafeForEach([]byte("key"), func(k, v []byte) error {
kv := &mvccpb.KeyValue{}
kv.Unmarshal(v)
// This is compact key.
if !strings.HasPrefix(string(kv.Key), "/") {
return nil
}
ttlOpt := store.TTLOptionSet{}
if kv.Lease != 0 {
ttlOpt = store.TTLOptionSet{ExpireTime: expireTime}
}
if !isTombstone(k) {
sk := path.Join(strings.Trim(etcdserver.StoreKeysPrefix, "/"), string(kv.Key))
_, err := st.Set(sk, false, string(kv.Value), ttlOpt)
if err != nil {
return err
}
} else {
st.Delete(string(kv.Key), false, false)
}
return nil
})
if err != nil {
glog.Fatal(err)
}
tx.Unlock()
if err := traverseAndDeleteEmptyDir(st, "/"); err != nil {
glog.Fatal(err)
}
// rebuild cluster state.
metadata, hardstate, oldSt, err := rebuild(*migrateDatadir)
if err != nil {
glog.Fatal(err)
}
// In the following, it's low level logic that saves metadata and data into v2 snapshot.
backupPath := *migrateDatadir + ".rollback.backup"
if err := os.Rename(*migrateDatadir, backupPath); err != nil {
glog.Fatal(err)
}
if err := os.MkdirAll(path.Join(*migrateDatadir, "member", "snap"), 0700); err != nil {
glog.Fatal(err)
}
walDir := path.Join(*migrateDatadir, "member", "wal")
w, err := wal.Create(walDir, metadata)
if err != nil {
glog.Fatal(err)
}
err = w.SaveSnapshot(walpb.Snapshot{Index: hardstate.Commit, Term: hardstate.Term})
if err != nil {
glog.Fatal(err)
}
w.Close()
event, err := oldSt.Get(etcdserver.StoreClusterPrefix, true, false)
if err != nil {
glog.Fatal(err)
}
// nodes (members info) for ConfState
nodes := []uint64{}
traverseMetadata(event.Node, func(n *store.NodeExtern) {
if n.Key != etcdserver.StoreClusterPrefix {
// update store metadata
v := ""
if !n.Dir {
v = *n.Value
}
if n.Key == path.Join(etcdserver.StoreClusterPrefix, "version") {
v = rollbackVersion
}
if _, err := st.Set(n.Key, n.Dir, v, store.TTLOptionSet{}); err != nil {
glog.Fatal(err)
}
// update nodes
fields := strings.Split(n.Key, "/")
if len(fields) == 4 && fields[2] == "members" {
//.........这里部分代码省略.........
示例14: SendSnapshot
func (s *snapTransporter) SendSnapshot(m snap.Message) {
ss := snap.New(s.snapDir)
ss.SaveDBFrom(m.ReadCloser, m.Snapshot.Metadata.Index+1)
m.CloseWithError(nil)
s.snapDoneC <- m
}
示例15: NewServer
// NewServer creates a new EtcdServer from the supplied configuration. The
// configuration is considered static for the lifetime of the EtcdServer.
func NewServer(cfg *ServerConfig) (*EtcdServer, error) {
st := store.New(StoreClusterPrefix, StoreKeysPrefix)
var w *wal.WAL
var n raft.Node
var s *raft.MemoryStorage
var id types.ID
var cl *cluster
// Run the migrations.
dataVer, err := version.DetectDataDir(cfg.DataDir)
if err != nil {
return nil, err
}
if err := upgradeDataDir(cfg.DataDir, cfg.Name, dataVer); err != nil {
return nil, err
}
haveWAL := wal.Exist(cfg.WALDir())
ss := snap.New(cfg.SnapDir())
var remotes []*Member
switch {
case !haveWAL && !cfg.NewCluster:
if err := cfg.VerifyJoinExisting(); err != nil {
return nil, err
}
cl, err = newClusterFromURLsMap(cfg.InitialClusterToken, cfg.InitialPeerURLsMap)
if err != nil {
return nil, err
}
existingCluster, err := GetClusterFromRemotePeers(getRemotePeerURLs(cl, cfg.Name), cfg.Transport)
if err != nil {
return nil, fmt.Errorf("cannot fetch cluster info from peer urls: %v", err)
}
if err := ValidateClusterAndAssignIDs(cl, existingCluster); err != nil {
return nil, fmt.Errorf("error validating peerURLs %s: %v", existingCluster, err)
}
if !isCompatibleWithCluster(cl, cl.MemberByName(cfg.Name).ID, cfg.Transport) {
return nil, fmt.Errorf("incomptible with current running cluster")
}
remotes = existingCluster.Members()
cl.SetID(existingCluster.id)
cl.SetStore(st)
cfg.Print()
id, n, s, w = startNode(cfg, cl, nil)
case !haveWAL && cfg.NewCluster:
if err := cfg.VerifyBootstrap(); err != nil {
return nil, err
}
cl, err = newClusterFromURLsMap(cfg.InitialClusterToken, cfg.InitialPeerURLsMap)
if err != nil {
return nil, err
}
m := cl.MemberByName(cfg.Name)
if isMemberBootstrapped(cl, cfg.Name, cfg.Transport) {
return nil, fmt.Errorf("member %s has already been bootstrapped", m.ID)
}
if cfg.ShouldDiscover() {
str, err := discovery.JoinCluster(cfg.DiscoveryURL, cfg.DiscoveryProxy, m.ID, cfg.InitialPeerURLsMap.String())
if err != nil {
return nil, err
}
urlsmap, err := types.NewURLsMap(str)
if err != nil {
return nil, err
}
if checkDuplicateURL(urlsmap) {
return nil, fmt.Errorf("discovery cluster %s has duplicate url", urlsmap)
}
if cl, err = newClusterFromURLsMap(cfg.InitialClusterToken, urlsmap); err != nil {
return nil, err
}
}
cl.SetStore(st)
cfg.PrintWithInitial()
id, n, s, w = startNode(cfg, cl, cl.MemberIDs())
case haveWAL:
if err := fileutil.IsDirWriteable(cfg.DataDir); err != nil {
return nil, fmt.Errorf("cannot write to data directory: %v", err)
}
if err := fileutil.IsDirWriteable(cfg.MemberDir()); err != nil {
return nil, fmt.Errorf("cannot write to member directory: %v", err)
}
if cfg.ShouldDiscover() {
plog.Warningf("discovery token ignored since a cluster has already been initialized. Valid log found at %q", cfg.WALDir())
}
snapshot, err := ss.Load()
if err != nil && err != snap.ErrNoSnapshot {
return nil, err
}
if snapshot != nil {
if err := st.Recovery(snapshot.Data); err != nil {
plog.Panicf("recovered store from snapshot error: %v", err)
}
plog.Infof("recovered store from snapshot at index %d", snapshot.Metadata.Index)
//.........这里部分代码省略.........