本文整理汇总了Golang中github.com/coreos/etcd/integration.NewClusterV3函数的典型用法代码示例。如果您正苦于以下问题:Golang NewClusterV3函数的具体用法?Golang NewClusterV3怎么用?Golang NewClusterV3使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了NewClusterV3函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: TestKVDelete
func TestKVDelete(t *testing.T) {
defer testutil.AfterTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
defer clus.Terminate(t)
kv := clientv3.NewKV(clus.RandClient())
ctx := context.TODO()
presp, err := kv.Put(ctx, "foo", "")
if err != nil {
t.Fatalf("couldn't put 'foo' (%v)", err)
}
if presp.Header.Revision != 2 {
t.Fatalf("presp.Header.Revision got %d, want %d", presp.Header.Revision, 2)
}
resp, err := kv.Delete(ctx, "foo")
if err != nil {
t.Fatalf("couldn't delete key (%v)", err)
}
if resp.Header.Revision != 3 {
t.Fatalf("resp.Header.Revision got %d, want %d", resp.Header.Revision, 3)
}
gresp, err := kv.Get(ctx, "foo")
if err != nil {
t.Fatalf("couldn't get key (%v)", err)
}
if len(gresp.Kvs) > 0 {
t.Fatalf("gresp.Kvs got %+v, want none", gresp.Kvs)
}
}
示例2: TestDialSetEndpoints
// TestDialSetEndpoints ensures SetEndpoints can replace unavailable endpoints with available ones.
func TestDialSetEndpoints(t *testing.T) {
defer testutil.AfterTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
defer clus.Terminate(t)
// get endpoint list
eps := make([]string, 3)
for i := range eps {
eps[i] = clus.Members[i].GRPCAddr()
}
toKill := rand.Intn(len(eps))
cfg := clientv3.Config{Endpoints: []string{eps[toKill]}, DialTimeout: 1 * time.Second}
cli, err := clientv3.New(cfg)
if err != nil {
t.Fatal(err)
}
defer cli.Close()
// make a dead node
clus.Members[toKill].Stop(t)
clus.WaitLeader(t)
// update client with available endpoints
cli.SetEndpoints(eps[(toKill+1)%3])
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
if _, err = cli.Get(ctx, "foo", clientv3.WithSerializable()); err != nil {
t.Fatal(err)
}
cancel()
}
示例3: TestTxnWriteFail
func TestTxnWriteFail(t *testing.T) {
defer testutil.AfterTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
defer clus.Terminate(t)
kv := clientv3.NewKV(clus.Client(0))
clus.Members[0].Stop(t)
<-clus.Members[0].StopNotify()
resp, err := kv.Txn().Then(clientv3.OpPut("foo", "bar", 0)).Commit()
if err == nil {
t.Fatalf("expected error, got response %v", resp)
}
// reconnect so cluster terminate doesn't complain about double-close
clus.Members[0].Restart(t)
// and ensure the put didn't take
gresp, gerr := kv.Get("foo", 0)
if gerr != nil {
t.Fatal(gerr)
}
if len(gresp.Kvs) != 0 {
t.Fatalf("expected no keys, got %v", gresp.Kvs)
}
}
示例4: TestWatchWithCreatedNotificationDropConn
// TestWatchWithCreatedNotificationDropConn ensures that
// a watcher with created notify does not post duplicate
// created events from disconnect.
func TestWatchWithCreatedNotificationDropConn(t *testing.T) {
cluster := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
defer cluster.Terminate(t)
client := cluster.RandClient()
wch := client.Watch(context.Background(), "a", clientv3.WithCreatedNotify())
resp := <-wch
if !resp.Created {
t.Fatalf("expected created event, got %v", resp)
}
cluster.Members[0].DropConnections()
// try to receive from watch channel again
// ensure it doesn't post another createNotify
select {
case wresp := <-wch:
t.Fatalf("got unexpected watch response: %+v\n", wresp)
case <-time.After(time.Second):
// watcher may not reconnect by the time it hits the select,
// so it wouldn't have a chance to filter out the second create event
}
}
示例5: TestKVPutStoppedServerAndClose
// TestKVPutStoppedServerAndClose ensures closing after a failed Put works.
func TestKVPutStoppedServerAndClose(t *testing.T) {
defer testutil.AfterTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
cli := clus.Client(0)
clus.Members[0].Stop(t)
ctx, cancel := context.WithTimeout(context.TODO(), time.Second)
// get retries on all errors.
// so here we use it to eat the potential broken pipe error for the next put.
// grpc client might see a broken pipe error when we issue the get request before
// grpc finds out the original connection is down due to the member shutdown.
_, err := cli.Get(ctx, "abc")
cancel()
if !strings.Contains(err.Error(), "context deadline") {
t.Fatal(err)
}
// this Put fails and triggers an asynchronous connection retry
_, err = cli.Put(ctx, "abc", "123")
cancel()
if !strings.Contains(err.Error(), "context deadline") {
t.Fatal(err)
}
}
示例6: TestTLSConnection
func TestTLSConnection(t *testing.T) {
certFile, keyFile, caFile := configureTLSCerts(t)
tlsInfo := &transport.TLSInfo{
CertFile: certFile,
KeyFile: keyFile,
CAFile: caFile,
}
cluster := integration.NewClusterV3(t, &integration.ClusterConfig{
Size: 1,
ClientTLS: tlsInfo,
})
defer cluster.Terminate(t)
cfg := storagebackend.Config{
Type: storagebackend.StorageTypeETCD3,
ServerList: []string{cluster.Members[0].GRPCAddr()},
CertFile: certFile,
KeyFile: keyFile,
CAFile: caFile,
Codec: testapi.Default.Codec(),
}
storage, err := newETCD3Storage(cfg)
if err != nil {
t.Fatal(err)
}
err = storage.Create(context.TODO(), "/abc", &api.Pod{}, nil, 0)
if err != nil {
t.Fatalf("Create failed: %v", err)
}
}
示例7: TestKVPutWithRequireLeader
func TestKVPutWithRequireLeader(t *testing.T) {
defer testutil.AfterTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
defer clus.Terminate(t)
clus.Members[1].Stop(t)
clus.Members[2].Stop(t)
// wait for election timeout, then member[0] will not have a leader.
var (
electionTicks = 10
tickDuration = 10 * time.Millisecond
)
time.Sleep(time.Duration(3*electionTicks) * tickDuration)
kv := clientv3.NewKV(clus.Client(0))
_, err := kv.Put(clientv3.WithRequireLeader(context.Background()), "foo", "bar")
if err != rpctypes.ErrNoLeader {
t.Fatal(err)
}
// clients may give timeout errors since the members are stopped; take
// the clients so that terminating the cluster won't complain
clus.Client(1).Close()
clus.Client(2).Close()
clus.TakeClient(1)
clus.TakeClient(2)
}
示例8: TestKVPutError
func TestKVPutError(t *testing.T) {
defer testutil.AfterTest(t)
var (
maxReqBytes = 1.5 * 1024 * 1024
quota = int64(maxReqBytes * 1.2)
)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1, QuotaBackendBytes: quota})
defer clus.Terminate(t)
kv := clientv3.NewKV(clus.RandClient())
ctx := context.TODO()
_, err := kv.Put(ctx, "", "bar")
if err != rpctypes.ErrEmptyKey {
t.Fatalf("expected %v, got %v", rpctypes.ErrEmptyKey, err)
}
_, err = kv.Put(ctx, "key", strings.Repeat("a", int(maxReqBytes+100))) // 1.5MB
if err != rpctypes.ErrRequestTooLarge {
t.Fatalf("expected %v, got %v", rpctypes.ErrRequestTooLarge, err)
}
_, err = kv.Put(ctx, "foo1", strings.Repeat("a", int(maxReqBytes-50)))
if err != nil { // below quota
t.Fatal(err)
}
time.Sleep(500 * time.Millisecond) // give enough time for commit
_, err = kv.Put(ctx, "foo2", strings.Repeat("a", int(maxReqBytes-50)))
if err != rpctypes.ErrNoSpace { // over quota
t.Fatalf("expected %v, got %v", rpctypes.ErrNoSpace, err)
}
}
示例9: TestCompactConflict
// TestCompactConflict tests that two compactors (Let's use C1, C2) are trying to compact etcd cluster with the same
// logical time.
// - C1 compacts first. It will succeed.
// - C2 compacts after. It will fail. But it will get latest logical time, which should be larger by one.
func TestCompactConflict(t *testing.T) {
cluster := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
defer cluster.Terminate(t)
client := cluster.RandClient()
ctx := context.Background()
putResp, err := client.Put(ctx, "/somekey", "data")
if err != nil {
t.Fatalf("Put failed: %v", err)
}
// Compact first. It would do the compaction and return compact time which is incremented by 1.
curTime, _, err := compact(ctx, client, 0, putResp.Header.Revision)
if err != nil {
t.Fatalf("compact failed: %v", err)
}
if curTime != 1 {
t.Errorf("Expect current logical time = 1, get = %v", curTime)
}
// Compact again with the same parameters. It won't do compaction but return the latest compact time.
curTime2, _, err := compact(ctx, client, 0, putResp.Header.Revision)
if err != nil {
t.Fatalf("compact failed: %v", err)
}
if curTime != curTime2 {
t.Errorf("Unexpected curTime (%v) != curTime2 (%v)", curTime, curTime2)
}
}
示例10: TestKVPutOneEndpointDown
// TestKVGetOneEndpointDown ensures a client can connect and get if one endpoint is down
func TestKVPutOneEndpointDown(t *testing.T) {
defer testutil.AfterTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
defer clus.Terminate(t)
// get endpoint list
eps := make([]string, 3)
for i := range eps {
eps[i] = clus.Members[i].GRPCAddr()
}
// make a dead node
clus.Members[rand.Intn(len(eps))].Stop(t)
// try to connect with dead node in the endpoint list
cfg := clientv3.Config{Endpoints: eps, DialTimeout: 1 * time.Second}
cli, err := clientv3.New(cfg)
if err != nil {
t.Fatal(err)
}
defer cli.Close()
ctx, cancel := context.WithTimeout(context.TODO(), 3*time.Second)
if _, err := cli.Get(ctx, "abc", clientv3.WithSerializable()); err != nil {
t.Fatal(err)
}
cancel()
}
示例11: TestLeaseRevoke
func TestLeaseRevoke(t *testing.T) {
defer testutil.AfterTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
defer clus.Terminate(t)
lapi := clientv3.NewLease(clus.RandClient())
defer lapi.Close()
kv := clientv3.NewKV(clus.RandClient())
resp, err := lapi.Create(context.Background(), 10)
if err != nil {
t.Errorf("failed to create lease %v", err)
}
_, err = lapi.Revoke(context.Background(), lease.LeaseID(resp.ID))
if err != nil {
t.Errorf("failed to revoke lease %v", err)
}
_, err = kv.Put(context.TODO(), "foo", "bar", clientv3.WithLease(lease.LeaseID(resp.ID)))
if err != v3rpc.ErrLeaseNotFound {
t.Fatalf("err = %v, want %v", err, v3rpc.ErrLeaseNotFound)
}
}
示例12: TestCompact
func TestCompact(t *testing.T) {
cluster := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
defer cluster.Terminate(t)
client := cluster.RandClient()
ctx := context.Background()
putResp, err := client.Put(ctx, "/somekey", "data")
if err != nil {
t.Fatalf("Put failed: %v", err)
}
putResp1, err := client.Put(ctx, "/somekey", "data2")
if err != nil {
t.Fatalf("Put failed: %v", err)
}
_, _, err = compact(ctx, client, 0, putResp1.Header.Revision)
if err != nil {
t.Fatalf("compact failed: %v", err)
}
obj, err := client.Get(ctx, "/somekey", clientv3.WithRev(putResp.Header.Revision))
if err != etcdrpc.ErrCompacted {
t.Errorf("Expecting ErrCompacted, but get=%v err=%v", obj, err)
}
}
示例13: TestMemberRemove
func TestMemberRemove(t *testing.T) {
defer testutil.AfterTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
defer clus.Terminate(t)
capi := clientv3.NewCluster(clus.Client(1))
resp, err := capi.MemberList(context.Background())
if err != nil {
t.Fatalf("failed to list member %v", err)
}
_, err = capi.MemberRemove(context.Background(), resp.Members[0].ID)
if err != nil {
t.Fatalf("failed to remove member %v", err)
}
resp, err = capi.MemberList(context.Background())
if err != nil {
t.Fatalf("failed to list member %v", err)
}
if len(resp.Members) != 2 {
t.Errorf("number of members = %d, want %d", len(resp.Members), 2)
}
}
示例14: TestWatchCancelAndCloseClient
// TestWatchCanelAndCloseClient ensures that canceling a watcher then immediately
// closing the client does not return a client closing error.
func TestWatchCancelAndCloseClient(t *testing.T) {
defer testutil.AfterTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
defer clus.Terminate(t)
cli := clus.Client(0)
ctx, cancel := context.WithCancel(context.Background())
wch := cli.Watch(ctx, "abc")
donec := make(chan struct{})
go func() {
defer close(donec)
select {
case wr, ok := <-wch:
if ok {
t.Fatalf("expected closed watch after cancel(), got resp=%+v err=%v", wr, wr.Err())
}
case <-time.After(5 * time.Second):
t.Fatal("timed out waiting for closed channel")
}
}()
cancel()
if err := cli.Close(); err != nil {
t.Fatal(err)
}
<-donec
clus.TakeClient(0)
}
示例15: TestLeaseKeepAlive
func TestLeaseKeepAlive(t *testing.T) {
defer testutil.AfterTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
defer clus.Terminate(t)
lapi := clientv3.NewLease(clus.RandClient())
resp, err := lapi.Create(context.Background(), 10)
if err != nil {
t.Errorf("failed to create lease %v", err)
}
rc, kerr := lapi.KeepAlive(context.Background(), lease.LeaseID(resp.ID))
if kerr != nil {
t.Errorf("failed to keepalive lease %v", kerr)
}
kresp, ok := <-rc
if !ok {
t.Errorf("chan is closed, want not closed")
}
if kresp.ID != resp.ID {
t.Errorf("ID = %x, want %x", kresp.ID, resp.ID)
}
lapi.Close()
_, ok = <-rc
if ok {
t.Errorf("chan is not closed, want lease Close() closes chan")
}
}