本文整理汇总了Golang中github.com/cockroachdb/cockroach/roachpb.Header.RangeID方法的典型用法代码示例。如果您正苦于以下问题:Golang Header.RangeID方法的具体用法?Golang Header.RangeID怎么用?Golang Header.RangeID使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类github.com/cockroachdb/cockroach/roachpb.Header
的用法示例。
在下文中一共展示了Header.RangeID方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: TestStoreSplitReadRace
// TestStoreSplitReadRace prevents regression of #3148. It begins a couple of
// read requests and lets them complete while a split is happening; the reads
// hit the second half of the split. If the split happens non-atomically with
// respect to the reads (and in particular their update of the timestamp
// cache), then some of them may not be reflected in the timestamp cache of the
// new range, in which case this test would fail.
func TestStoreSplitReadRace(t *testing.T) {
defer leaktest.AfterTest(t)
defer func() { storage.TestingCommandFilter = nil }()
splitKey := roachpb.Key("a")
key := func(i int) roachpb.Key {
return append(splitKey.Next(), []byte(fmt.Sprintf("%03d", i))...)
}
getContinues := make(chan struct{})
var getStarted sync.WaitGroup
storage.TestingCommandFilter = func(_ roachpb.StoreID, args roachpb.Request, h roachpb.Header) error {
if et, ok := args.(*roachpb.EndTransactionRequest); ok {
st := et.InternalCommitTrigger.GetSplitTrigger()
if st == nil || !st.UpdatedDesc.EndKey.Equal(splitKey) {
return nil
}
close(getContinues)
} else if args.Method() == roachpb.Get &&
bytes.HasPrefix(args.Header().Key, splitKey.Next()) {
getStarted.Done()
<-getContinues
}
return nil
}
store, stopper := createTestStore(t)
defer stopper.Stop()
now := store.Clock().Now()
var wg sync.WaitGroup
ts := func(i int) roachpb.Timestamp {
return now.Add(0, int32(1000+i))
}
const num = 10
for i := 0; i < num; i++ {
wg.Add(1)
getStarted.Add(1)
go func(i int) {
defer wg.Done()
args := getArgs(key(i))
var h roachpb.Header
h.Timestamp = ts(i)
if _, err := client.SendWrappedWith(rg1(store), nil, h, &args); err != nil {
t.Fatal(err)
}
}(i)
}
getStarted.Wait()
wg.Add(1)
func() {
defer wg.Done()
args := adminSplitArgs(roachpb.KeyMin, splitKey)
if _, err := client.SendWrapped(rg1(store), nil, &args); err != nil {
t.Fatal(err)
}
}()
wg.Wait()
for i := 0; i < num; i++ {
var h roachpb.Header
h.Timestamp = now
args := putArgs(key(i), []byte("foo"))
h.RangeID = store.LookupReplica(keys.Addr(args.Key), nil).Desc().RangeID
reply, err := client.SendWrappedWith(store, nil, h, &args)
if err != nil {
t.Fatal(err)
}
if reply.Header().Timestamp.Less(ts(i)) {
t.Fatalf("%d: expected Put to be forced higher than %s by timestamp caches, but wrote at %s", i, ts(i), reply.Header().Timestamp)
}
}
}
示例2: TestStoreSplitReadRace
// TestStoreSplitReadRace prevents regression of #3148. It begins a couple of
// read requests and lets them complete while a split is happening; the reads
// hit the second half of the split. If the split happens non-atomically with
// respect to the reads (and in particular their update of the timestamp
// cache), then some of them may not be reflected in the timestamp cache of the
// new range, in which case this test would fail.
func TestStoreSplitReadRace(t *testing.T) {
defer leaktest.AfterTest(t)()
defer config.TestingDisableTableSplits()()
splitKey := roachpb.Key("a")
key := func(i int) roachpb.Key {
splitCopy := append([]byte(nil), splitKey.Next()...)
return append(splitCopy, []byte(fmt.Sprintf("%03d", i))...)
}
getContinues := make(chan struct{})
var getStarted sync.WaitGroup
sCtx := storage.TestStoreContext()
sCtx.TestingKnobs.TestingCommandFilter =
func(filterArgs storagebase.FilterArgs) *roachpb.Error {
if et, ok := filterArgs.Req.(*roachpb.EndTransactionRequest); ok {
st := et.InternalCommitTrigger.GetSplitTrigger()
if st == nil || !st.UpdatedDesc.EndKey.Equal(splitKey) {
return nil
}
close(getContinues)
} else if filterArgs.Req.Method() == roachpb.Get &&
bytes.HasPrefix(filterArgs.Req.Header().Key, splitKey.Next()) {
getStarted.Done()
<-getContinues
}
return nil
}
store, stopper, _ := createTestStoreWithContext(t, &sCtx)
defer stopper.Stop()
now := store.Clock().Now()
var wg sync.WaitGroup
ts := func(i int) hlc.Timestamp {
return now.Add(0, int32(1000+i))
}
const num = 10
for i := 0; i < num; i++ {
wg.Add(1)
getStarted.Add(1)
go func(i int) {
defer wg.Done()
args := getArgs(key(i))
var h roachpb.Header
h.Timestamp = ts(i)
if _, pErr := client.SendWrappedWith(rg1(store), nil, h, &args); pErr != nil {
t.Fatal(pErr)
}
}(i)
}
getStarted.Wait()
wg.Add(1)
func() {
defer wg.Done()
args := adminSplitArgs(roachpb.KeyMin, splitKey)
if _, pErr := client.SendWrapped(rg1(store), nil, &args); pErr != nil {
t.Fatal(pErr)
}
}()
wg.Wait()
for i := 0; i < num; i++ {
var h roachpb.Header
h.Timestamp = now
args := putArgs(key(i), []byte("foo"))
keyAddr, err := keys.Addr(args.Key)
if err != nil {
t.Fatal(err)
}
h.RangeID = store.LookupReplica(keyAddr, nil).RangeID
_, respH, pErr := storage.SendWrapped(store, context.Background(), h, &args)
if pErr != nil {
t.Fatal(pErr)
}
if respH.Timestamp.Less(ts(i)) {
t.Fatalf("%d: expected Put to be forced higher than %s by timestamp caches, but wrote at %s", i, ts(i), respH.Timestamp)
}
}
}