本文整理汇总了Golang中github.com/coreos/etcd/clientv3.WithRev函数的典型用法代码示例。如果您正苦于以下问题:Golang WithRev函数的具体用法?Golang WithRev怎么用?Golang WithRev使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了WithRev函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: observe
func (e *Election) observe(ctx context.Context, ch chan<- v3.GetResponse) {
client := e.session.Client()
defer close(ch)
for {
resp, err := client.Get(ctx, e.keyPrefix, v3.WithFirstCreate()...)
if err != nil {
return
}
var kv *mvccpb.KeyValue
cctx, cancel := context.WithCancel(ctx)
if len(resp.Kvs) == 0 {
// wait for first key put on prefix
opts := []v3.OpOption{v3.WithRev(resp.Header.Revision), v3.WithPrefix()}
wch := client.Watch(cctx, e.keyPrefix, opts...)
for kv == nil {
wr, ok := <-wch
if !ok || wr.Err() != nil {
cancel()
return
}
// only accept PUTs; a DELETE will make observe() spin
for _, ev := range wr.Events {
if ev.Type == mvccpb.PUT {
kv = ev.Kv
break
}
}
}
} else {
kv = resp.Kvs[0]
}
wch := client.Watch(cctx, string(kv.Key), v3.WithRev(kv.ModRevision))
keyDeleted := false
for !keyDeleted {
wr, ok := <-wch
if !ok {
return
}
for _, ev := range wr.Events {
if ev.Type == mvccpb.DELETE {
keyDeleted = true
break
}
resp.Header = &wr.Header
resp.Kvs = []*mvccpb.KeyValue{ev.Kv}
select {
case ch <- *resp:
case <-cctx.Done():
return
}
}
}
cancel()
}
}
示例2: TestWatchWithRequireLeader
// TestWatchWithRequireLeader checks the watch channel closes when no leader.
func TestWatchWithRequireLeader(t *testing.T) {
defer testutil.AfterTest(t)
clus := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3})
defer clus.Terminate(t)
// Put a key for the non-require leader watch to read as an event.
// The watchers will be on member[0]; put key through member[0] to
// ensure that it receives the update so watching after killing quorum
// is guaranteed to have the key.
liveClient := clus.Client(0)
if _, err := liveClient.Put(context.TODO(), "foo", "bar"); err != nil {
t.Fatal(err)
}
clus.Members[1].Stop(t)
clus.Members[2].Stop(t)
clus.Client(1).Close()
clus.Client(2).Close()
clus.TakeClient(1)
clus.TakeClient(2)
// wait for election timeout, then member[0] will not have a leader.
tickDuration := 10 * time.Millisecond
time.Sleep(time.Duration(3*clus.Members[0].ElectionTicks) * tickDuration)
chLeader := liveClient.Watch(clientv3.WithRequireLeader(context.TODO()), "foo", clientv3.WithRev(1))
chNoLeader := liveClient.Watch(context.TODO(), "foo", clientv3.WithRev(1))
select {
case resp, ok := <-chLeader:
if !ok {
t.Fatalf("expected %v watch channel, got closed channel", rpctypes.ErrNoLeader)
}
if resp.Err() != rpctypes.ErrNoLeader {
t.Fatalf("expected %v watch response error, got %+v", rpctypes.ErrNoLeader, resp)
}
case <-time.After(3 * time.Second):
t.Fatal("watch without leader took too long to close")
}
select {
case resp, ok := <-chLeader:
if ok {
t.Fatalf("expected closed channel, got response %v", resp)
}
case <-time.After(3 * time.Second):
t.Fatal("waited too long for channel to close")
}
if _, ok := <-chNoLeader; !ok {
t.Fatalf("expected response, got closed channel")
}
}
示例3: watchCommandFunc
// watchCommandFunc executes the "watch" command.
func watchCommandFunc(cmd *cobra.Command, args []string) {
if watchInteractive {
watchInteractiveFunc(cmd, args)
return
}
if len(args) < 1 || len(args) > 2 {
ExitWithError(ExitBadArgs, fmt.Errorf("watch in non-interactive mode requires one or two arguments as key or prefix, with range end"))
}
opts := []clientv3.OpOption{clientv3.WithRev(watchRev)}
key := args[0]
if len(args) == 2 {
if watchPrefix {
ExitWithError(ExitBadArgs, fmt.Errorf("`range_end` and `--prefix` cannot be set at the same time, choose one"))
}
opts = append(opts, clientv3.WithRange(args[1]))
}
if watchPrefix {
opts = append(opts, clientv3.WithPrefix())
}
c := mustClientFromCmd(cmd)
wc := c.Watch(context.TODO(), key, opts...)
printWatchCh(wc)
err := c.Close()
if err == nil {
ExitWithError(ExitInterrupted, fmt.Errorf("watch is canceled by the server"))
}
ExitWithError(ExitBadConnection, err)
}
示例4: ExampleKV_getWithRev
func ExampleKV_getWithRev() {
cli, err := clientv3.New(clientv3.Config{
Endpoints: endpoints,
DialTimeout: dialTimeout,
})
if err != nil {
log.Fatal(err)
}
defer cli.Close()
presp, err := cli.Put(context.TODO(), "foo", "bar1")
if err != nil {
log.Fatal(err)
}
_, err = cli.Put(context.TODO(), "foo", "bar2")
if err != nil {
log.Fatal(err)
}
ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
resp, err := cli.Get(ctx, "foo", clientv3.WithRev(presp.Header.Revision))
cancel()
if err != nil {
log.Fatal(err)
}
for _, ev := range resp.Kvs {
fmt.Printf("%s : %s\n", ev.Key, ev.Value)
}
// Output: foo : bar1
}
示例5: Lock
func (rwm *RWMutex) Lock() error {
rk, err := NewUniqueEphemeralKey(rwm.client, rwm.key+"/write")
if err != nil {
return err
}
rwm.myKey = rk
for {
// find any key of lower rev number blocks the write lock
opts := append(v3.WithLastRev(), v3.WithRev(rk.Revision()-1))
resp, err := rwm.client.Get(rwm.ctx, rwm.key, opts...)
if err != nil {
return err
}
if len(resp.Kvs) == 0 {
// no matching for revision before myKey; acquired
break
}
if err := rwm.waitOnLowest(); err != nil {
return err
}
// get the new lowest, etc until this is the only one left
}
return nil
}
示例6: Lock
// Lock locks the mutex with a cancellable context. If the context is cancelled
// while trying to acquire the lock, the mutex tries to clean its stale lock entry.
func (m *Mutex) Lock(ctx context.Context) error {
s, serr := NewSession(m.client)
if serr != nil {
return serr
}
m.myKey = fmt.Sprintf("%s/%x", m.pfx, s.Lease())
cmp := v3.Compare(v3.CreateRevision(m.myKey), "=", 0)
// put self in lock waiters via myKey; oldest waiter holds lock
put := v3.OpPut(m.myKey, "", v3.WithLease(s.Lease()))
// reuse key in case this session already holds the lock
get := v3.OpGet(m.myKey)
resp, err := m.client.Txn(ctx).If(cmp).Then(put).Else(get).Commit()
if err != nil {
return err
}
m.myRev = resp.Header.Revision
if !resp.Succeeded {
m.myRev = resp.Responses[0].GetResponseRange().Kvs[0].CreateRevision
}
// wait for deletion revisions prior to myKey
err = waitDeletes(ctx, m.client, m.pfx, v3.WithPrefix(), v3.WithRev(m.myRev-1))
// release lock key if cancelled
select {
case <-ctx.Done():
m.Unlock()
default:
}
return err
}
示例7: watchLeader
func (c *client) watchLeader(leaderPath string, revision int64) {
defer c.wg.Done()
WATCH:
for {
log.Infof("[pd] start watch pd leader on path %v, revision %v", leaderPath, revision)
rch := c.etcdClient.Watch(context.Background(), leaderPath, clientv3.WithRev(revision))
select {
case resp := <-rch:
if resp.Canceled {
log.Warn("[pd] leader watcher canceled")
continue WATCH
}
leaderAddr, rev, err := getLeader(c.etcdClient, leaderPath)
if err != nil {
log.Warn(err)
continue WATCH
}
log.Infof("[pd] found new pd-server leader addr: %v", leaderAddr)
c.workerMutex.Lock()
c.worker.stop(errors.New("[pd] leader change"))
c.worker = newRPCWorker(leaderAddr, c.clusterID)
c.workerMutex.Unlock()
revision = rev
case <-c.quit:
return
}
}
}
示例8: waitDeletes
// waitDeletes efficiently waits until all keys matched by Get(key, opts...) are deleted
func waitDeletes(ctx context.Context, client *v3.Client, key string, opts ...v3.OpOption) error {
getOpts := []v3.OpOption{v3.WithSort(v3.SortByCreatedRev, v3.SortAscend)}
getOpts = append(getOpts, opts...)
resp, err := client.Get(ctx, key, getOpts...)
maxRev := int64(math.MaxInt64)
getOpts = append(getOpts, v3.WithRev(0))
for err == nil {
for len(resp.Kvs) > 0 {
i := len(resp.Kvs) - 1
if resp.Kvs[i].CreateRevision <= maxRev {
break
}
resp.Kvs = resp.Kvs[:i]
}
if len(resp.Kvs) == 0 {
break
}
lastKV := resp.Kvs[len(resp.Kvs)-1]
maxRev = lastKV.CreateRevision
err = waitDelete(ctx, client, string(lastKV.Key), maxRev)
if err != nil || len(resp.Kvs) == 1 {
break
}
getOpts = append(getOpts, v3.WithLimit(int64(len(resp.Kvs)-1)))
resp, err = client.Get(ctx, key, getOpts...)
}
return err
}
示例9: NewWatchProxy
func NewWatchProxy(c *clientv3.Client) pb.WatchServer {
wp := &watchProxy{
cw: c.Watcher,
ctx: clientv3.WithRequireLeader(c.Ctx()),
retryLimiter: rate.NewLimiter(rate.Limit(retryPerSecond), retryPerSecond),
leaderc: make(chan struct{}),
}
wp.ranges = newWatchRanges(wp)
go func() {
// a new streams without opening any watchers won't catch
// a lost leader event, so have a special watch to monitor it
rev := int64((uint64(1) << 63) - 2)
for wp.ctx.Err() == nil {
wch := wp.cw.Watch(wp.ctx, lostLeaderKey, clientv3.WithRev(rev))
for range wch {
}
wp.mu.Lock()
close(wp.leaderc)
wp.leaderc = make(chan struct{})
wp.mu.Unlock()
wp.retryLimiter.Wait(wp.ctx)
}
wp.mu.Lock()
<-wp.ctx.Done()
wp.mu.Unlock()
wp.wg.Wait()
wp.ranges.stop()
}()
return wp
}
示例10: WaitEvents
// WaitEvents waits on a key until it observes the given events and returns the final one.
func WaitEvents(c *clientv3.Client, key string, rev int64, evs []storagepb.Event_EventType) (*storagepb.Event, error) {
wc := c.Watch(context.Background(), key, clientv3.WithRev(rev))
if wc == nil {
return nil, ErrNoWatcher
}
return waitEvents(wc, evs), nil
}
示例11: WaitPrefixEvents
func WaitPrefixEvents(c *clientv3.Client, prefix string, rev int64, evs []mvccpb.Event_EventType) (*clientv3.Event, error) {
wc := c.Watch(context.Background(), prefix, clientv3.WithPrefix(), clientv3.WithRev(rev))
if wc == nil {
return nil, ErrNoWatcher
}
return waitEvents(wc, evs), nil
}
示例12: newWatchBroadcast
func newWatchBroadcast(wp *watchProxy, w *watcher, update func(*watchBroadcast)) *watchBroadcast {
cctx, cancel := context.WithCancel(wp.ctx)
wb := &watchBroadcast{
cancel: cancel,
nextrev: w.nextrev,
receivers: make(map[*watcher]struct{}),
donec: make(chan struct{}),
}
wb.add(w)
go func() {
defer close(wb.donec)
// loop because leader loss will close channel
for cctx.Err() == nil {
wch := wp.cw.Watch(cctx, w.wr.key,
clientv3.WithRange(w.wr.end),
clientv3.WithProgressNotify(),
clientv3.WithCreatedNotify(),
clientv3.WithRev(wb.nextrev),
clientv3.WithPrevKV(),
)
for wr := range wch {
wb.bcast(wr)
update(wb)
}
wp.retryLimiter.Wait(cctx)
}
}()
return wb
}
示例13: checkCompact
func (c *cluster) checkCompact(rev int64) error {
if rev == 0 {
return nil
}
for _, u := range c.GRPCURLs {
cli, err := clientv3.New(clientv3.Config{
Endpoints: []string{u},
DialTimeout: 5 * time.Second,
})
if err != nil {
return fmt.Errorf("%v (endpoint %s)", err, u)
}
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
wch := cli.Watch(ctx, "\x00", clientv3.WithFromKey(), clientv3.WithRev(rev-1))
wr, ok := <-wch
cancel()
cli.Close()
if !ok {
return fmt.Errorf("watch channel terminated (endpoint %s)", u)
}
if wr.CompactRevision != rev {
return fmt.Errorf("got compact revision %v, wanted %v (endpoint %s)", wr.CompactRevision, rev, u)
}
}
return nil
}
示例14: startWatching
// startWatching does:
// - get current objects if initialRev=0; set initialRev to current rev
// - watch on given key and send events to process.
func (wc *watchChan) startWatching(watchClosedCh chan struct{}) {
if wc.initialRev == 0 {
if err := wc.sync(); err != nil {
glog.Errorf("failed to sync with latest state: %v", err)
wc.sendError(err)
return
}
}
opts := []clientv3.OpOption{clientv3.WithRev(wc.initialRev + 1), clientv3.WithPrevKV()}
if wc.recursive {
opts = append(opts, clientv3.WithPrefix())
}
wch := wc.watcher.client.Watch(wc.ctx, wc.key, opts...)
for wres := range wch {
if wres.Err() != nil {
err := wres.Err()
// If there is an error on server (e.g. compaction), the channel will return it before closed.
glog.Errorf("watch chan error: %v", err)
wc.sendError(err)
return
}
for _, e := range wres.Events {
wc.sendEvent(parseEvent(e))
}
}
// When we come to this point, it's only possible that client side ends the watch.
// e.g. cancel the context, close the client.
// If this watch chan is broken and context isn't cancelled, other goroutines will still hang.
// We should notify the main thread that this goroutine has exited.
close(watchClosedCh)
}
示例15: TestCompact
func TestCompact(t *testing.T) {
cluster := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
defer cluster.Terminate(t)
client := cluster.RandClient()
ctx := context.Background()
putResp, err := client.Put(ctx, "/somekey", "data")
if err != nil {
t.Fatalf("Put failed: %v", err)
}
putResp1, err := client.Put(ctx, "/somekey", "data2")
if err != nil {
t.Fatalf("Put failed: %v", err)
}
_, _, err = compact(ctx, client, 0, putResp1.Header.Revision)
if err != nil {
t.Fatalf("compact failed: %v", err)
}
obj, err := client.Get(ctx, "/somekey", clientv3.WithRev(putResp.Header.Revision))
if err != etcdrpc.ErrCompacted {
t.Errorf("Expecting ErrCompacted, but get=%v err=%v", obj, err)
}
}