本文整理汇总了Golang中github.com/coreos/etcd/store.New函数的典型用法代码示例。如果您正苦于以下问题:Golang New函数的具体用法?Golang New怎么用?Golang New使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了New函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: TestApplyRepeat
// TestApplyRepeat tests that server handles repeat raft messages gracefully
func TestApplyRepeat(t *testing.T) {
n := newNodeConfChangeCommitterStream()
n.readyc <- raft.Ready{
SoftState: &raft.SoftState{RaftState: raft.StateLeader},
}
cl := newTestCluster(nil)
st := store.New()
cl.SetStore(store.New())
cl.AddMember(&membership.Member{ID: 1234})
s := &EtcdServer{
r: raftNode{
Node: n,
raftStorage: raft.NewMemoryStorage(),
storage: mockstorage.NewStorageRecorder(""),
transport: rafthttp.NewNopTransporter(),
},
cfg: &ServerConfig{},
store: st,
cluster: cl,
reqIDGen: idutil.NewGenerator(0, time.Time{}),
}
s.applyV2 = &applierV2store{s}
s.start()
req := &pb.Request{Method: "QGET", ID: uint64(1)}
ents := []raftpb.Entry{{Index: 1, Data: pbutil.MustMarshal(req)}}
n.readyc <- raft.Ready{CommittedEntries: ents}
// dup msg
n.readyc <- raft.Ready{CommittedEntries: ents}
// use a conf change to block until dup msgs are all processed
cc := &raftpb.ConfChange{Type: raftpb.ConfChangeRemoveNode, NodeID: 2}
ents = []raftpb.Entry{{
Index: 2,
Type: raftpb.EntryConfChange,
Data: pbutil.MustMarshal(cc),
}}
n.readyc <- raft.Ready{CommittedEntries: ents}
// wait for conf change message
act, err := n.Wait(1)
// wait for stop message (async to avoid deadlock)
stopc := make(chan error)
go func() {
_, werr := n.Wait(1)
stopc <- werr
}()
s.Stop()
// only want to confirm etcdserver won't panic; no data to check
if err != nil {
t.Fatal(err)
}
if len(act) == 0 {
t.Fatalf("expected len(act)=0, got %d", len(act))
}
if err = <-stopc; err != nil {
t.Fatalf("error on stop (%v)", err)
}
}
示例2: TestApplyMultiConfChangeShouldStop
// TestApplyMultiConfChangeShouldStop ensures that apply will return shouldStop
// if the local member is removed along with other conf updates.
func TestApplyMultiConfChangeShouldStop(t *testing.T) {
cl := membership.NewCluster("")
cl.SetStore(store.New())
for i := 1; i <= 5; i++ {
cl.AddMember(&membership.Member{ID: types.ID(i)})
}
srv := &EtcdServer{
id: 2,
r: raftNode{
Node: newNodeNop(),
transport: rafthttp.NewNopTransporter(),
},
cluster: cl,
w: wait.New(),
}
ents := []raftpb.Entry{}
for i := 1; i <= 4; i++ {
ent := raftpb.Entry{
Term: 1,
Index: uint64(i),
Type: raftpb.EntryConfChange,
Data: pbutil.MustMarshal(
&raftpb.ConfChange{
Type: raftpb.ConfChangeRemoveNode,
NodeID: uint64(i)}),
}
ents = append(ents, ent)
}
_, shouldStop := srv.apply(ents, &raftpb.ConfState{})
if !shouldStop {
t.Errorf("shouldStop = %t, want %t", shouldStop, true)
}
}
示例3: runServer
func (e *Etcd) runServer() {
var removeNotify <-chan bool
for {
if e.mode == PeerMode {
log.Infof("%v starting in peer mode", e.Config.Name)
// Starting peer server should be followed close by listening on its port
// If not, it may leave many requests unaccepted, or cannot receive heartbeat from the cluster.
// One severe problem caused if failing receiving heartbeats is when the second node joins one-node cluster,
// the cluster could be out of work as long as the two nodes cannot transfer messages.
e.PeerServer.Start(e.Config.Snapshot, e.Config.ClusterConfig())
removeNotify = e.PeerServer.RemoveNotify()
} else {
log.Infof("%v starting in standby mode", e.Config.Name)
e.StandbyServer.Start()
removeNotify = e.StandbyServer.RemoveNotify()
}
// etcd server is ready to accept connections, notify waiters.
e.onceReady.Do(func() { close(e.readyNotify) })
select {
case <-e.closeChan:
e.PeerServer.Stop()
e.StandbyServer.Stop()
return
case <-removeNotify:
}
if e.mode == PeerMode {
peerURLs := e.Registry.PeerURLs(e.PeerServer.RaftServer().Leader(), e.Config.Name)
e.StandbyServer.SyncCluster(peerURLs)
e.setMode(StandbyMode)
} else {
// Create etcd key-value store and registry.
e.Store = store.New()
e.Registry = server.NewRegistry(e.Store)
e.PeerServer.SetStore(e.Store)
e.PeerServer.SetRegistry(e.Registry)
e.Server.SetStore(e.Store)
e.Server.SetRegistry(e.Registry)
// Generate new peer server here.
// TODO(yichengq): raft server cannot be started after stopped.
// It should be removed when raft restart is implemented.
heartbeatInterval := time.Duration(e.Config.Peer.HeartbeatInterval) * time.Millisecond
electionTimeout := time.Duration(e.Config.Peer.ElectionTimeout) * time.Millisecond
raftServer, err := raft.NewServer(e.Config.Name, e.Config.DataDir, e.PeerServer.RaftServer().Transporter(), e.Store, e.PeerServer, "")
if err != nil {
log.Fatal(err)
}
raftServer.SetElectionTimeout(electionTimeout)
raftServer.SetHeartbeatInterval(heartbeatInterval)
e.PeerServer.SetRaftServer(raftServer, e.Config.Snapshot)
e.StandbyServer.SetRaftServer(raftServer)
e.PeerServer.SetJoinIndex(e.StandbyServer.JoinIndex())
e.setMode(PeerMode)
}
}
}
示例4: TestClusterFromStore
func TestClusterFromStore(t *testing.T) {
tests := []struct {
mems []Member
}{
{
[]Member{newTestMember(1, nil, "node1", nil)},
},
{
[]Member{},
},
{
[]Member{
newTestMember(1, nil, "node1", nil),
newTestMember(2, nil, "node2", nil),
},
},
}
for i, tt := range tests {
st := store.New()
hc := newTestCluster(nil)
hc.SetStore(st)
for _, m := range tt.mems {
hc.AddMember(&m)
}
c := NewClusterFromStore("abc", st)
if c.name != "abc" {
t.Errorf("#%d: name = %v, want %v", i, c.name, "abc")
}
wc := newTestCluster(tt.mems)
if !reflect.DeepEqual(c.members, wc.members) {
t.Errorf("#%d: members = %v, want %v", i, c.members, wc.members)
}
}
}
示例5: TestClusterFromStore
func TestClusterFromStore(t *testing.T) {
tests := []struct {
mems []*Member
}{
{
[]*Member{newTestMember(1, nil, "", nil)},
},
{
nil,
},
{
[]*Member{
newTestMember(1, nil, "", nil),
newTestMember(2, nil, "", nil),
},
},
}
for i, tt := range tests {
hc := newTestCluster(nil)
hc.SetStore(store.New())
for _, m := range tt.mems {
hc.AddMember(m)
}
c := NewClusterFromStore("abc", hc.store)
if c.token != "abc" {
t.Errorf("#%d: token = %v, want %v", i, c.token, "abc")
}
if !reflect.DeepEqual(c.Members(), tt.mems) {
t.Errorf("#%d: members = %v, want %v", i, c.Members(), tt.mems)
}
}
}
示例6: RunServer
// Starts a server in a temporary directory.
func RunServer(f func(*server.Server)) {
path, _ := ioutil.TempDir("", "etcd-")
defer os.RemoveAll(path)
store := store.New()
registry := server.NewRegistry(store)
ps := server.NewPeerServer(testName, path, testRaftURL, testRaftURL, &server.TLSConfig{Scheme: "http"}, &server.TLSInfo{}, registry, store, testSnapCount)
s := server.New(testName, testClientURL, testClientURL, &server.TLSConfig{Scheme: "http"}, &server.TLSInfo{}, ps, registry, store)
ps.SetServer(s)
// Start up peer server.
c := make(chan bool)
go func() {
c <- true
ps.ListenAndServe(false, []string{})
}()
<-c
// Start up etcd server.
go func() {
c <- true
s.ListenAndServe()
}()
<-c
// Wait to make sure servers have started.
time.Sleep(50 * time.Millisecond)
// Execute the function passed in.
f(s)
// Clean up servers.
ps.Close()
s.Close()
}
示例7: rebuildStoreV2
func rebuildStoreV2() store.Store {
waldir := migrateWALdir
if len(waldir) == 0 {
waldir = path.Join(migrateDatadir, "member", "wal")
}
snapdir := path.Join(migrateDatadir, "member", "snap")
ss := snap.New(snapdir)
snapshot, err := ss.Load()
if err != nil && err != snap.ErrNoSnapshot {
ExitWithError(ExitError, err)
}
var walsnap walpb.Snapshot
if snapshot != nil {
walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term
}
w, err := wal.OpenForRead(waldir, walsnap)
if err != nil {
ExitWithError(ExitError, err)
}
defer w.Close()
_, _, ents, err := w.ReadAll()
if err != nil {
ExitWithError(ExitError, err)
}
st := store.New()
if snapshot != nil {
err := st.Recovery(snapshot.Data)
if err != nil {
ExitWithError(ExitError, err)
}
}
applier := etcdserver.NewApplierV2(st, nil)
for _, ent := range ents {
if ent.Type != raftpb.EntryNormal {
continue
}
var raftReq pb.InternalRaftRequest
if !pbutil.MaybeUnmarshal(&raftReq, ent.Data) { // backward compatible
var r pb.Request
pbutil.MustUnmarshal(&r, ent.Data)
applyRequest(&r, applier)
} else {
if raftReq.V2 != nil {
req := raftReq.V2
applyRequest(req, applier)
}
}
}
return st
}
示例8: startEtcd
// startEtcd launches the etcd server and HTTP handlers for client/server communication.
func startEtcd() {
id, err := strconv.ParseInt(*fid, 0, 64)
if err != nil {
log.Fatal(err)
}
if id == raft.None {
log.Fatalf("etcd: cannot use None(%d) as etcdserver id", raft.None)
}
if peers.Pick(id) == "" {
log.Fatalf("%#x=<addr> must be specified in peers", id)
}
if *dir == "" {
*dir = fmt.Sprintf("%v_etcd_data", *fid)
log.Printf("main: no data-dir is given, using default data-dir ./%s", *dir)
}
if err := os.MkdirAll(*dir, privateDirMode); err != nil {
log.Fatalf("main: cannot create data directory: %v", err)
}
n, w := startRaft(id, peers.IDs(), path.Join(*dir, "wal"))
s := &etcdserver.EtcdServer{
Store: store.New(),
Node: n,
Save: w.Save,
Send: etcdhttp.Sender(*peers),
Ticker: time.Tick(100 * time.Millisecond),
SyncTicker: time.Tick(500 * time.Millisecond),
}
s.Start()
ch := etcdhttp.NewClientHandler(s, *peers, *timeout)
ph := etcdhttp.NewPeerHandler(s)
// Start the peer server in a goroutine
go func() {
log.Print("Listening for peers on ", *paddr)
log.Fatal(http.ListenAndServe(*paddr, ph))
}()
// Start a client server goroutine for each listen address
for _, addr := range *addrs {
addr := addr
go func() {
log.Print("Listening for client requests on ", addr)
log.Fatal(http.ListenAndServe(addr, ch))
}()
}
}
示例9: TestSet
func TestSet(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
st := store.New()
n := raft.Start(1, []int64{1}, 0, 0)
n.Campaign(ctx)
srv := &etcdserver.Server{
Node: n,
Store: st,
Send: etcdserver.SendFunc(nopSend),
Save: func(st raftpb.State, ents []raftpb.Entry) {},
}
etcdserver.Start(srv)
defer srv.Stop()
h := Handler{
Timeout: time.Hour,
Server: srv,
}
s := httptest.NewServer(h)
defer s.Close()
resp, err := http.PostForm(s.URL+"/v2/keys/foo", url.Values{"value": {"bar"}})
if err != nil {
t.Fatal(err)
}
if resp.StatusCode != 201 {
t.Errorf("StatusCode = %d, expected %d", 201, resp.StatusCode)
}
g := new(store.Event)
if err := json.NewDecoder(resp.Body).Decode(&g); err != nil {
t.Fatal(err)
}
w := &store.NodeExtern{
Key: "/foo/1",
Value: stringp("bar"),
ModifiedIndex: 1,
CreatedIndex: 1,
}
if !reflect.DeepEqual(g.Node, w) {
t.Errorf("g = %+v, want %+v", g.Node, w)
}
}
示例10: TestRemoveMember
// TestRemoveMember tests RemoveMember can propose and perform node removal.
func TestRemoveMember(t *testing.T) {
n := newNodeConfChangeCommitterRecorder()
n.readyc <- raft.Ready{
SoftState: &raft.SoftState{RaftState: raft.StateLeader},
}
cl := newTestCluster(nil)
st := store.New()
cl.SetStore(store.New())
cl.AddMember(&membership.Member{ID: 1234})
s := &EtcdServer{
r: raftNode{
Node: n,
raftStorage: raft.NewMemoryStorage(),
storage: mockstorage.NewStorageRecorder(""),
transport: rafthttp.NewNopTransporter(),
},
cfg: &ServerConfig{},
store: st,
cluster: cl,
reqIDGen: idutil.NewGenerator(0, time.Time{}),
}
s.start()
err := s.RemoveMember(context.TODO(), 1234)
gaction := n.Action()
s.Stop()
if err != nil {
t.Fatalf("RemoveMember error: %v", err)
}
wactions := []testutil.Action{{Name: "ProposeConfChange:ConfChangeRemoveNode"}, {Name: "ApplyConfChange:ConfChangeRemoveNode"}}
if !reflect.DeepEqual(gaction, wactions) {
t.Errorf("action = %v, want %v", gaction, wactions)
}
if cl.Member(1234) != nil {
t.Errorf("member with id 1234 is not removed")
}
}
示例11: handleImportSnap
func handleImportSnap(c *cli.Context) error {
d, err := ioutil.ReadFile(c.String("snap"))
if err != nil {
if c.String("snap") == "" {
fmt.Printf("no snapshot file provided (use --snap)\n")
} else {
fmt.Printf("cannot read snapshot file %s\n", c.String("snap"))
}
os.Exit(1)
}
st := store.New()
err = st.Recovery(d)
wg := &sync.WaitGroup{}
setc := make(chan set)
concurrent := c.Int("c")
fmt.Printf("starting to import snapshot %s with %d clients\n", c.String("snap"), concurrent)
for i := 0; i < concurrent; i++ {
go runSet(mustNewKeyAPI(c), setc, wg)
}
all, err := st.Get("/", true, true)
if err != nil {
handleError(ExitServerError, err)
}
n := copyKeys(all.Node, setc)
hiddens := c.StringSlice("hidden")
for _, h := range hiddens {
allh, err := st.Get(h, true, true)
if err != nil {
handleError(ExitServerError, err)
}
n += copyKeys(allh.Node, setc)
}
close(setc)
wg.Wait()
fmt.Printf("finished importing %d keys\n", n)
return nil
}
示例12: startEtcd
func startEtcd() http.Handler {
id, err := strconv.ParseInt(*fid, 0, 64)
if err != nil {
log.Fatal(err)
}
if id == raft.None {
log.Fatalf("etcd: cannot use None(%d) as etcdserver id", raft.None)
}
if peers.Pick(id) == "" {
log.Fatalf("%#x=<addr> must be specified in peers", id)
}
if *dir == "" {
*dir = fmt.Sprintf("%v_etcd_data", *fid)
log.Printf("main: no data-dir is given, using default data-dir ./%s", *dir)
}
if err := os.MkdirAll(*dir, privateDirMode); err != nil {
log.Fatalf("main: cannot create data directory: %v", err)
}
n, w := startRaft(id, peers.IDs(), path.Join(*dir, "wal"))
tk := time.NewTicker(100 * time.Millisecond)
s := &etcdserver.Server{
Store: store.New(),
Node: n,
Save: w.Save,
Send: etcdhttp.Sender(*peers),
Ticker: tk.C,
}
etcdserver.Start(s)
h := etcdhttp.Handler{
Timeout: *timeout,
Server: s,
Peers: *peers,
}
return &h
}
示例13: TestApplySnapshotAndCommittedEntries
// TestApplySnapshotAndCommittedEntries tests that server applies snapshot
// first and then committed entries.
func TestApplySnapshotAndCommittedEntries(t *testing.T) {
n := newNopReadyNode()
st := store.NewRecorder()
cl := newCluster("abc")
cl.SetStore(store.New())
storage := raft.NewMemoryStorage()
s := &EtcdServer{
cfg: &ServerConfig{},
r: raftNode{
Node: n,
storage: &storageRecorder{},
raftStorage: storage,
transport: rafthttp.NewNopTransporter(),
},
store: st,
cluster: cl,
}
s.start()
req := &pb.Request{Method: "QGET"}
n.readyc <- raft.Ready{
Snapshot: raftpb.Snapshot{Metadata: raftpb.SnapshotMetadata{Index: 1}},
CommittedEntries: []raftpb.Entry{
{Index: 2, Data: pbutil.MustMarshal(req)},
},
}
// make goroutines move forward to receive snapshot
actions, _ := st.Wait(2)
s.Stop()
if len(actions) != 2 {
t.Fatalf("len(action) = %d, want 2", len(actions))
}
if actions[0].Name != "Recovery" {
t.Errorf("actions[0] = %s, want %s", actions[0].Name, "Recovery")
}
if actions[1].Name != "Get" {
t.Errorf("actions[1] = %s, want %s", actions[1].Name, "Get")
}
}
示例14: main
func main() {
flag.Parse()
id, err := strconv.ParseInt(*fid, 0, 64)
if err != nil {
log.Fatal(err)
}
if peers.Pick(id) == "" {
log.Fatalf("%#x=<addr> must be specified in peers", id)
}
if *dir == "" {
*dir = fmt.Sprintf("%v_etcd_data", *fid)
log.Printf("main: no data-dir is given, using default data-dir ./%s", *dir)
}
if err := os.MkdirAll(*dir, privateDirMode); err != nil {
log.Fatalf("main: cannot create data directory: %v", err)
}
n, w := startRaft(id, peers.Ids(), path.Join(*dir, "wal"))
tk := time.NewTicker(100 * time.Millisecond)
s := &etcdserver.Server{
Store: store.New(),
Node: n,
Save: w.Save,
Send: etcdhttp.Sender(*peers),
Ticker: tk.C,
}
etcdserver.Start(s)
h := &etcdhttp.Handler{
Timeout: *timeout,
Server: s,
}
http.Handle("/", h)
log.Fatal(http.ListenAndServe(*laddr, nil))
}
示例15: TestRecvSnapshot
// TestRecvSnapshot tests when it receives a snapshot from raft leader,
// it should trigger storage.SaveSnap and also store.Recover.
func TestRecvSnapshot(t *testing.T) {
n := newNopReadyNode()
st := store.NewRecorder()
p := &storageRecorder{}
cl := newCluster("abc")
cl.SetStore(store.New())
s := &EtcdServer{
cfg: &ServerConfig{},
r: raftNode{
Node: n,
transport: rafthttp.NewNopTransporter(),
storage: p,
raftStorage: raft.NewMemoryStorage(),
},
store: st,
cluster: cl,
}
s.start()
n.readyc <- raft.Ready{Snapshot: raftpb.Snapshot{Metadata: raftpb.SnapshotMetadata{Index: 1}}}
// wait for actions happened on the storage
for len(p.Action()) == 0 {
time.Sleep(10 * time.Millisecond)
}
s.Stop()
wactions := []testutil.Action{{Name: "Recovery"}}
if g := st.Action(); !reflect.DeepEqual(g, wactions) {
t.Errorf("store action = %v, want %v", g, wactions)
}
wactions = []testutil.Action{{Name: "SaveSnap"}, {Name: "Save"}}
if g := p.Action(); !reflect.DeepEqual(g, wactions) {
t.Errorf("storage action = %v, want %v", g, wactions)
}
}