本文整理汇总了Golang中github.com/cockroachdb/cockroach/storage/engine.MVCCIterate函数的典型用法代码示例。如果您正苦于以下问题:Golang MVCCIterate函数的具体用法?Golang MVCCIterate怎么用?Golang MVCCIterate使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了MVCCIterate函数的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: Entries
// Entries implements the raft.Storage interface. Note that maxBytes is advisory
// and this method will always return at least one entry even if it exceeds
// maxBytes. Passing maxBytes equal to zero disables size checking.
// TODO(bdarnell): consider caching for recent entries, if rocksdb's builtin caching
// is insufficient.
func (r *Range) Entries(lo, hi, maxBytes uint64) ([]raftpb.Entry, error) {
// Scan over the log to find the requested entries in the range [lo, hi),
// stopping once we have enough.
var ents []raftpb.Entry
size := uint64(0)
var ent raftpb.Entry
scanFunc := func(kv proto.KeyValue) (bool, error) {
err := gogoproto.Unmarshal(kv.Value.GetBytes(), &ent)
if err != nil {
return false, err
}
size += uint64(ent.Size())
ents = append(ents, ent)
return maxBytes > 0 && size > maxBytes, nil
}
_, err := engine.MVCCIterate(r.rm.Engine(),
keys.RaftLogKey(r.Desc().RaftID, lo),
keys.RaftLogKey(r.Desc().RaftID, hi),
proto.ZeroTimestamp, true /* consistent */, nil /* txn */, scanFunc)
if err != nil {
return nil, err
}
// If neither the number of entries nor the size limitations had an
// effect, we weren't able to supply everything the client wanted.
if len(ents) != int(hi-lo) && (maxBytes == 0 || size < maxBytes) {
return nil, raft.ErrUnavailable
}
return ents, nil
}
示例2: iterateEntries
func iterateEntries(
e engine.Reader, rangeID roachpb.RangeID, lo, hi uint64, scanFunc func(roachpb.KeyValue) (bool, error),
) error {
_, err := engine.MVCCIterate(
context.Background(), e,
keys.RaftLogKey(rangeID, lo),
keys.RaftLogKey(rangeID, hi),
hlc.ZeroTimestamp,
true, /* consistent */
nil, /* txn */
false, /* !reverse */
scanFunc,
)
return err
}
示例3: Iterate
// Iterate walks through the sequence cache, invoking the given callback for
// each unmarshaled entry with the key, the transaction ID and the decoded
// entry.
func (sc *SequenceCache) Iterate(e engine.Engine, f func([]byte, []byte, roachpb.SequenceCacheEntry)) {
_, _ = engine.MVCCIterate(e, sc.min, sc.max, roachpb.ZeroTimestamp,
true /* consistent */, nil /* txn */, false, /* !reverse */
func(kv roachpb.KeyValue) (bool, error) {
var entry roachpb.SequenceCacheEntry
id, _, _, err := decodeSequenceCacheKey(kv.Key, nil)
if err != nil {
panic(err) // TODO(tschottdorf): ReplicaCorruptionError
}
if err := kv.Value.GetProto(&entry); err != nil {
panic(err) // TODO(tschottdorf): ReplicaCorruptionError
}
f(kv.Key, id, entry)
return false, nil
})
}
示例4: Iterate
// Iterate walks through the abort cache, invoking the given callback for
// each unmarshaled entry with the key, the transaction ID and the decoded
// entry.
func (sc *AbortCache) Iterate(
e engine.Engine, f func([]byte, *uuid.UUID, roachpb.AbortCacheEntry),
) {
_, _ = engine.MVCCIterate(e, sc.min(), sc.max(), roachpb.ZeroTimestamp,
true /* consistent */, nil /* txn */, false, /* !reverse */
func(kv roachpb.KeyValue) (bool, error) {
var entry roachpb.AbortCacheEntry
txnID, err := keys.DecodeAbortCacheKey(kv.Key, nil)
if err != nil {
panic(err) // TODO(tschottdorf): ReplicaCorruptionError
}
if err := kv.Value.GetProto(&entry); err != nil {
panic(err) // TODO(tschottdorf): ReplicaCorruptionError
}
f(kv.Key, txnID, entry)
return false, nil
})
}
示例5: TestStoreRangeMergeMetadataCleanup
// TestStoreRangeMergeMetadataCleanup tests that all metadata of a
// subsumed range is cleaned up on merge.
func TestStoreRangeMergeMetadataCleanup(t *testing.T) {
defer leaktest.AfterTest(t)
store, stopper := createTestStore(t)
defer stopper.Stop()
scan := func(f func(roachpb.KeyValue) (bool, error)) {
if _, err := engine.MVCCIterate(store.Engine(), roachpb.KeyMin, roachpb.KeyMax, roachpb.ZeroTimestamp, true, nil, false, f); err != nil {
t.Fatal(err)
}
}
content := roachpb.Key("testing!")
// Write some values left of the proposed split key.
pArgs := putArgs([]byte("aaa"), content)
if _, err := client.SendWrapped(rg1(store), nil, &pArgs); err != nil {
t.Fatal(err)
}
// Collect all the keys.
preKeys := make(map[string]struct{})
scan(func(kv roachpb.KeyValue) (bool, error) {
preKeys[string(kv.Key)] = struct{}{}
return false, nil
})
// Split the range.
_, bDesc, err := createSplitRanges(store)
if err != nil {
t.Fatal(err)
}
// Write some values right of the split key.
pArgs = putArgs([]byte("ccc"), content)
if _, err := client.SendWrappedWith(rg1(store), nil, roachpb.Header{
RangeID: bDesc.RangeID,
}, &pArgs); err != nil {
t.Fatal(err)
}
// Merge the b range back into the a range.
args := adminMergeArgs(roachpb.KeyMin)
if _, err := client.SendWrapped(rg1(store), nil, &args); err != nil {
t.Fatal(err)
}
// Collect all the keys again.
postKeys := make(map[string]struct{})
scan(func(kv roachpb.KeyValue) (bool, error) {
postKeys[string(kv.Key)] = struct{}{}
return false, nil
})
// Compute the new keys.
for k := range preKeys {
delete(postKeys, k)
}
// Keep only the subsumed range's local keys.
localRangeKeyPrefix := string(keys.MakeRangeIDPrefix(bDesc.RangeID))
for k := range postKeys {
if !strings.HasPrefix(k, localRangeKeyPrefix) {
delete(postKeys, k)
}
}
if numKeys := len(postKeys); numKeys > 0 {
var buf bytes.Buffer
fmt.Fprintf(&buf, "%d keys were not cleaned up:\n", numKeys)
for k := range postKeys {
fmt.Fprintf(&buf, "%q\n", k)
}
t.Fatal(buf.String())
}
}
示例6: entries
func (r *Replica) entries(e engine.Engine, lo, hi, maxBytes uint64) ([]raftpb.Entry, error) {
if lo > hi {
return nil, util.Errorf("lo:%d is greater than hi:%d", lo, hi)
}
// Scan over the log to find the requested entries in the range [lo, hi),
// stopping once we have enough.
var ents []raftpb.Entry
size := uint64(0)
var ent raftpb.Entry
expectedIndex := lo
exceededMaxBytes := false
scanFunc := func(kv roachpb.KeyValue) (bool, error) {
if err := kv.Value.GetProto(&ent); err != nil {
return false, err
}
// Exit early if we have any gaps or it has been compacted.
if ent.Index != expectedIndex {
return true, nil
}
expectedIndex++
size += uint64(ent.Size())
ents = append(ents, ent)
exceededMaxBytes = maxBytes > 0 && size > maxBytes
return exceededMaxBytes, nil
}
rangeID := r.RangeID
_, err := engine.MVCCIterate(e,
keys.RaftLogKey(rangeID, lo),
keys.RaftLogKey(rangeID, hi),
roachpb.ZeroTimestamp,
true /* consistent */, nil /* txn */, false /* !reverse */, scanFunc)
if err != nil {
return nil, err
}
// Did the correct number of results come back? If so, we're all good.
if len(ents) == int(hi)-int(lo) {
return ents, nil
}
// Did we hit the size limit? If so, return what we have.
if exceededMaxBytes {
return ents, nil
}
// Did we get any results at all? Because something went wrong.
if len(ents) > 0 {
// Was the lo already truncated?
if ents[0].Index > lo {
return nil, raft.ErrCompacted
}
// Was the missing index after the last index?
lastIndex, err := r.LastIndex()
if err != nil {
return nil, err
}
if lastIndex <= expectedIndex {
return nil, raft.ErrUnavailable
}
// We have a gap in the record, if so, return a nasty error.
return nil, util.Errorf("there is a gap in the index record between lo:%d and hi:%d at index:%d", lo, hi, expectedIndex)
}
// No results, was it due to unavailability or truncation?
ts, err := r.raftTruncatedStateLocked()
if err != nil {
return nil, err
}
if ts.Index >= lo {
// The requested lo index has already been truncated.
return nil, raft.ErrCompacted
}
// The requested lo index does not yet exist.
return nil, raft.ErrUnavailable
}
示例7: TestGCQueueTransactionTable
//.........这里部分代码省略.........
if testCases[id].failResolve {
return roachpb.NewErrorWithTxn(util.Errorf("boom"), filterArgs.Hdr.Txn)
}
}
return nil
}
tc.StartWithStoreContext(t, tsc)
defer tc.Stop()
tc.manualClock.Set(int64(now))
outsideKey := tc.rng.Desc().EndKey.Next().AsRawKey()
testIntents := []roachpb.Span{{Key: roachpb.Key("intent")}}
txns := map[string]roachpb.Transaction{}
for strKey, test := range testCases {
baseKey := roachpb.Key(strKey)
txnClock := hlc.NewClock(hlc.NewManualClock(int64(test.orig)).UnixNano)
txn := newTransaction("txn1", baseKey, 1, enginepb.SERIALIZABLE, txnClock)
txn.Status = test.status
txn.Intents = testIntents
if test.hb > 0 {
txn.LastHeartbeat = &hlc.Timestamp{WallTime: int64(test.hb)}
}
// Set a high Timestamp to make sure it does not matter. Only
// OrigTimestamp (and heartbeat) are used for GC decisions.
txn.Timestamp.Forward(hlc.MaxTimestamp)
txns[strKey] = *txn
for _, addrKey := range []roachpb.Key{baseKey, outsideKey} {
key := keys.TransactionKey(addrKey, txn.ID)
if err := engine.MVCCPutProto(context.Background(), tc.engine, nil, key, hlc.ZeroTimestamp, nil, txn); err != nil {
t.Fatal(err)
}
}
entry := roachpb.AbortCacheEntry{Key: txn.Key, Timestamp: txn.LastActive()}
if err := tc.rng.abortCache.Put(context.Background(), tc.engine, nil, txn.ID, &entry); err != nil {
t.Fatal(err)
}
}
// Run GC.
gcQ := newGCQueue(tc.gossip)
cfg, ok := tc.gossip.GetSystemConfig()
if !ok {
t.Fatal("config not set")
}
if err := gcQ.process(tc.clock.Now(), tc.rng, cfg); err != nil {
t.Fatal(err)
}
util.SucceedsSoon(t, func() error {
for strKey, sp := range testCases {
txn := &roachpb.Transaction{}
key := keys.TransactionKey(roachpb.Key(strKey), txns[strKey].ID)
ok, err := engine.MVCCGetProto(context.Background(), tc.engine, key, hlc.ZeroTimestamp, true, nil, txn)
if err != nil {
return err
}
if expGC := (sp.newStatus == -1); expGC {
if expGC != !ok {
return fmt.Errorf("%s: expected gc: %t, but found %s\n%s", strKey, expGC, txn, roachpb.Key(strKey))
}
} else if sp.newStatus != txn.Status {
return fmt.Errorf("%s: expected status %s, but found %s", strKey, sp.newStatus, txn.Status)
}
var expIntents []roachpb.Span
if sp.expResolve {
expIntents = testIntents
}
if !reflect.DeepEqual(resolved[strKey], expIntents) {
return fmt.Errorf("%s: unexpected intent resolutions:\nexpected: %s\nobserved: %s",
strKey, expIntents, resolved[strKey])
}
entry := &roachpb.AbortCacheEntry{}
abortExists, err := tc.rng.abortCache.Get(context.Background(), tc.store.Engine(), txns[strKey].ID, entry)
if err != nil {
t.Fatal(err)
}
if (abortExists == false) != sp.expAbortGC {
return fmt.Errorf("%s: expected abort cache gc: %t, found %+v", strKey, sp.expAbortGC, entry)
}
}
return nil
})
outsideTxnPrefix := keys.TransactionKey(outsideKey, uuid.EmptyUUID)
outsideTxnPrefixEnd := keys.TransactionKey(outsideKey.Next(), uuid.EmptyUUID)
var count int
if _, err := engine.MVCCIterate(context.Background(), tc.store.Engine(), outsideTxnPrefix, outsideTxnPrefixEnd, hlc.ZeroTimestamp,
true, nil, false, func(roachpb.KeyValue) (bool, error) {
count++
return false, nil
}); err != nil {
t.Fatal(err)
}
if exp := len(testCases); exp != count {
t.Fatalf("expected the %d external transaction entries to remain untouched, "+
"but only %d are left", exp, count)
}
}
示例8: processTransactionTable
// processTransactionTable scans the transaction table and updates txnMap with
// those transactions which are old and either PENDING or with intents
// registered. In the first case we want to push the transaction so that it is
// aborted, and in the second case we may have to resolve the intents success-
// fully before GCing the entry. The transaction records which can be gc'ed are
// returned separately and are not added to txnMap nor intentSpanMap.
func (gcq *gcQueue) processTransactionTable(r *Replica, txnMap map[uuid.UUID]*roachpb.Transaction, cutoff roachpb.Timestamp) ([]roachpb.GCRequest_GCKey, error) {
snap := r.store.Engine().NewSnapshot()
defer snap.Close()
var numResolveAttempts, numQueuedPushes int
var gcKeys []roachpb.GCRequest_GCKey
defer func() {
gcq.eventLog.Infof(true, "attempted to resolve %d intents of %d gc'able transactions; queued %d txns for push", numResolveAttempts, len(gcKeys), numQueuedPushes)
}()
handleOne := func(kv roachpb.KeyValue) error {
var txn roachpb.Transaction
if err := kv.Value.GetProto(&txn); err != nil {
return err
}
ts := txn.Timestamp
if heartbeatTS := txn.LastHeartbeat; heartbeatTS != nil {
ts.Forward(*heartbeatTS)
}
if !ts.Less(cutoff) {
return nil
}
txnID := *txn.ID
// The transaction record should be considered for removal.
switch txn.Status {
case roachpb.PENDING:
// Marked as running, so we need to push it to abort it but won't
// try to GC it in this cycle (for convenience).
// TODO(tschottdorf): refactor so that we can GC PENDING entries
// in the same cycle, but keeping the calls to pushTxn in a central
// location (keeping it easy to batch them up in the future).
numQueuedPushes++
txnMap[txnID] = &txn
return nil
case roachpb.ABORTED:
// If we remove this transaction, it effectively still counts as
// ABORTED (by design). So this can be GC'ed even if we can't
// resolve the intents.
// Note: Most aborted transaction weren't aborted by their client,
// but instead by the coordinator - those will not have any intents
// persisted, though they still might exist in the system.
numResolveAttempts += len(txn.Intents)
if err := r.store.intentResolver.resolveIntents(r.context(), r,
roachpb.AsIntents(txn.Intents, &txn), true /* wait */, false /* !poison */); err != nil {
log.Warningf("failed to resolve intents of aborted txn on gc: %s", err)
}
case roachpb.COMMITTED:
// It's committed, so it doesn't need a push but we can only
// GC it after its intents are resolved.
numResolveAttempts += len(txn.Intents)
if err := r.store.intentResolver.resolveIntents(r.context(), r,
roachpb.AsIntents(txn.Intents, &txn), true /* wait */, false /* !poison */); err != nil {
log.Warningf("unable to resolve intents of committed txn on gc: %s", err)
// Returning the error here would abort the whole GC run, and
// we don't want that. Instead, we simply don't GC this entry.
return nil
}
default:
panic(fmt.Sprintf("invalid transaction state: %s", txn))
}
gcKeys = append(gcKeys, roachpb.GCRequest_GCKey{Key: kv.Key}) // zero timestamp
return nil
}
startKey := keys.TransactionKey(roachpb.KeyMin, uuid.EmptyUUID)
endKey := keys.TransactionKey(roachpb.KeyMax, uuid.EmptyUUID)
_, err := engine.MVCCIterate(snap, startKey, endKey, roachpb.ZeroTimestamp, true /* consistent */, nil /* txn */, false /* !reverse */, func(kv roachpb.KeyValue) (bool, error) {
return false, handleOne(kv)
})
return gcKeys, err
}
示例9: runDebugCheckStoreCmd
func runDebugCheckStoreCmd(cmd *cobra.Command, args []string) error {
stopper := stop.NewStopper()
defer stopper.Stop()
if len(args) != 1 {
return errors.New("required arguments: dir")
}
db, err := openStore(cmd, args[0], stopper)
if err != nil {
return err
}
// Iterate over the entire range-id-local space.
start := roachpb.Key(keys.LocalRangeIDPrefix)
end := start.PrefixEnd()
replicaInfo := map[roachpb.RangeID]*replicaCheckInfo{}
getReplicaInfo := func(rangeID roachpb.RangeID) *replicaCheckInfo {
if info, ok := replicaInfo[rangeID]; ok {
return info
}
replicaInfo[rangeID] = &replicaCheckInfo{}
return replicaInfo[rangeID]
}
if _, err := engine.MVCCIterate(context.Background(), db, start, end, hlc.MaxTimestamp,
false /* !consistent */, nil, /* txn */
false /* !reverse */, func(kv roachpb.KeyValue) (bool, error) {
rangeID, _, suffix, detail, err := keys.DecodeRangeIDKey(kv.Key)
if err != nil {
return false, err
}
switch {
case bytes.Equal(suffix, keys.LocalRaftTruncatedStateSuffix):
var trunc roachpb.RaftTruncatedState
if err := kv.Value.GetProto(&trunc); err != nil {
return false, err
}
getReplicaInfo(rangeID).truncatedIndex = trunc.Index
case bytes.Equal(suffix, keys.LocalRaftAppliedIndexSuffix):
idx, err := kv.Value.GetInt()
if err != nil {
return false, err
}
getReplicaInfo(rangeID).appliedIndex = uint64(idx)
case bytes.Equal(suffix, keys.LocalRaftLogSuffix):
_, index, err := encoding.DecodeUint64Ascending(detail)
if err != nil {
return false, err
}
ri := getReplicaInfo(rangeID)
if ri.firstIndex == 0 {
ri.firstIndex = index
ri.lastIndex = index
} else {
if index != ri.lastIndex+1 {
fmt.Printf("range %s: log index anomaly: %v followed by %v\n",
rangeID, ri.lastIndex, index)
}
ri.lastIndex = index
}
}
return false, nil
}); err != nil {
return err
}
for rangeID, info := range replicaInfo {
if info.truncatedIndex != info.firstIndex-1 {
fmt.Printf("range %s: truncated index %v should equal first index %v - 1\n",
rangeID, info.truncatedIndex, info.firstIndex)
}
if info.appliedIndex < info.firstIndex || info.appliedIndex > info.lastIndex {
fmt.Printf("range %s: applied index %v should be between first index %v and last index %v\n",
rangeID, info.appliedIndex, info.firstIndex, info.lastIndex)
}
}
return nil
}
示例10: runDebugGCCmd
func runDebugGCCmd(cmd *cobra.Command, args []string) error {
stopper := stop.NewStopper()
defer stopper.Stop()
if len(args) != 1 {
return errors.New("required arguments: dir")
}
var rangeID roachpb.RangeID
if len(args) == 2 {
var err error
if rangeID, err = parseRangeID(args[1]); err != nil {
return err
}
}
db, err := openStore(cmd, args[0], stopper)
if err != nil {
return err
}
start := keys.RangeDescriptorKey(roachpb.RKeyMin)
end := keys.RangeDescriptorKey(roachpb.RKeyMax)
var descs []roachpb.RangeDescriptor
if _, err := engine.MVCCIterate(context.Background(), db, start, end, hlc.MaxTimestamp,
false /* !consistent */, nil, /* txn */
false /* !reverse */, func(kv roachpb.KeyValue) (bool, error) {
var desc roachpb.RangeDescriptor
_, suffix, _, err := keys.DecodeRangeKey(kv.Key)
if err != nil {
return false, err
}
if !bytes.Equal(suffix, keys.LocalRangeDescriptorSuffix) {
return false, nil
}
if err := kv.Value.GetProto(&desc); err != nil {
return false, err
}
if desc.RangeID == rangeID || rangeID == 0 {
descs = append(descs, desc)
}
return desc.RangeID == rangeID, nil
}); err != nil {
return err
}
if len(descs) == 0 {
return fmt.Errorf("no range matching the criteria found")
}
for _, desc := range descs {
snap := db.NewSnapshot()
defer snap.Close()
_, info, err := storage.RunGC(context.Background(), &desc, snap, hlc.Timestamp{WallTime: timeutil.Now().UnixNano()},
config.GCPolicy{TTLSeconds: 24 * 60 * 60 /* 1 day */}, func(_ hlc.Timestamp, _ *roachpb.Transaction, _ roachpb.PushTxnType) {
}, func(_ []roachpb.Intent, _, _ bool) error { return nil })
if err != nil {
return err
}
fmt.Printf("RangeID: %d [%s, %s):\n", desc.RangeID, desc.StartKey, desc.EndKey)
_, _ = pretty.Println(info)
}
return nil
}
示例11: processTransactionTable
// processTransactionTable scans the transaction table and updates txnMap with
// those transactions which are old and either PENDING or with intents
// registered. In the first case we want to push the transaction so that it is
// aborted, and in the second case we may have to resolve the intents success-
// fully before GCing the entry. The transaction records which can be gc'ed are
// returned separately and are not added to txnMap nor intentSpanMap.
func processTransactionTable(
ctx context.Context,
snap engine.Engine,
desc *roachpb.RangeDescriptor,
txnMap map[uuid.UUID]*roachpb.Transaction,
cutoff roachpb.Timestamp,
infoMu *lockableGCInfo,
resolveIntents resolveFunc,
) ([]roachpb.GCRequest_GCKey, error) {
infoMu.Lock()
defer infoMu.Unlock()
var gcKeys []roachpb.GCRequest_GCKey
handleOne := func(kv roachpb.KeyValue) error {
var txn roachpb.Transaction
if err := kv.Value.GetProto(&txn); err != nil {
return err
}
infoMu.TransactionSpanTotal++
if !txn.LastActive().Less(cutoff) {
return nil
}
txnID := *txn.ID
// The transaction record should be considered for removal.
switch txn.Status {
case roachpb.PENDING:
// Marked as running, so we need to push it to abort it but won't
// try to GC it in this cycle (for convenience).
// TODO(tschottdorf): refactor so that we can GC PENDING entries
// in the same cycle, but keeping the calls to pushTxn in a central
// location (keeping it easy to batch them up in the future).
infoMu.TransactionSpanGCPending++
txnMap[txnID] = &txn
return nil
case roachpb.ABORTED:
// If we remove this transaction, it effectively still counts as
// ABORTED (by design). So this can be GC'ed even if we can't
// resolve the intents.
// Note: Most aborted transaction weren't aborted by their client,
// but instead by the coordinator - those will not have any intents
// persisted, though they still might exist in the system.
infoMu.TransactionSpanGCAborted++
func() {
infoMu.Unlock() // intentional
defer infoMu.Lock()
if err := resolveIntents(roachpb.AsIntents(txn.Intents, &txn),
true /* wait */, false /* !poison */); err != nil {
log.Warningf("failed to resolve intents of aborted txn on gc: %s", err)
}
}()
case roachpb.COMMITTED:
// It's committed, so it doesn't need a push but we can only
// GC it after its intents are resolved.
if err := func() error {
infoMu.Unlock() // intentional
defer infoMu.Lock()
return resolveIntents(roachpb.AsIntents(txn.Intents, &txn), true /* wait */, false /* !poison */)
}(); err != nil {
log.Warningf("unable to resolve intents of committed txn on gc: %s", err)
// Returning the error here would abort the whole GC run, and
// we don't want that. Instead, we simply don't GC this entry.
return nil
}
infoMu.TransactionSpanGCCommitted++
default:
panic(fmt.Sprintf("invalid transaction state: %s", txn))
}
gcKeys = append(gcKeys, roachpb.GCRequest_GCKey{Key: kv.Key}) // zero timestamp
return nil
}
startKey := keys.TransactionKey(desc.StartKey.AsRawKey(), uuid.EmptyUUID)
endKey := keys.TransactionKey(desc.EndKey.AsRawKey(), uuid.EmptyUUID)
_, err := engine.MVCCIterate(ctx, snap, startKey, endKey,
roachpb.ZeroTimestamp, true /* consistent */, nil, /* txn */
false /* !reverse */, func(kv roachpb.KeyValue) (bool, error) {
return false, handleOne(kv)
})
return gcKeys, err
}
示例12: TestGCQueueTransactionTable
//.........这里部分代码省略.........
Key: resArgs.Key,
EndKey: resArgs.EndKey,
})
// We've special cased one test case. Note that the intent is still
// counted in `resolved`.
if testCases[id].failResolve {
return util.Errorf("boom")
}
}
return nil
}
tc.StartWithStoreContext(t, tsc)
defer tc.Stop()
tc.manualClock.Set(int64(now))
outsideKey := tc.rng.Desc().EndKey.Next().AsRawKey()
testIntents := []roachpb.Span{{Key: roachpb.Key("intent")}}
txns := map[string]roachpb.Transaction{}
var epo uint32
for strKey, test := range testCases {
epo++
baseKey := roachpb.Key(strKey)
txnClock := hlc.NewClock(hlc.NewManualClock(int64(test.ts)).UnixNano)
txn := newTransaction("txn1", baseKey, 1, roachpb.SERIALIZABLE, txnClock)
txn.Status = test.status
txn.Intents = testIntents
txn.LastHeartbeat = &roachpb.Timestamp{WallTime: int64(test.heartbeatTS)}
txns[strKey] = *txn
for _, addrKey := range []roachpb.Key{baseKey, outsideKey} {
key := keys.TransactionKey(addrKey, txn.ID)
if err := engine.MVCCPutProto(tc.engine, nil, key, roachpb.ZeroTimestamp, nil, txn); err != nil {
t.Fatal(err)
}
}
seqTS := txn.Timestamp
seqTS.Forward(*txn.LastHeartbeat)
if err := tc.rng.sequence.Put(tc.engine, nil, txn.ID, epo, 2*epo, txn.Key, seqTS, nil /* err */); err != nil {
t.Fatal(err)
}
}
// Run GC.
gcQ := newGCQueue(tc.gossip)
cfg, ok := tc.gossip.GetSystemConfig()
if !ok {
t.Fatal("config not set")
}
if err := gcQ.process(tc.clock.Now(), tc.rng, cfg); err != nil {
t.Fatal(err)
}
util.SucceedsSoon(t, func() error {
for strKey, sp := range testCases {
txn := &roachpb.Transaction{}
key := keys.TransactionKey(roachpb.Key(strKey), txns[strKey].ID)
ok, err := engine.MVCCGetProto(tc.engine, key, roachpb.ZeroTimestamp, true, nil, txn)
if err != nil {
return err
}
if expGC := (sp.newStatus == -1); expGC {
if expGC != !ok {
return fmt.Errorf("%s: expected gc: %t, but found %s\n%s", strKey, expGC, txn, roachpb.Key(strKey))
}
} else if sp.newStatus != txn.Status {
return fmt.Errorf("%s: expected status %s, but found %s", strKey, sp.newStatus, txn.Status)
}
var expIntents []roachpb.Span
if sp.expResolve {
expIntents = testIntents
}
if !reflect.DeepEqual(resolved[strKey], expIntents) {
return fmt.Errorf("%s: unexpected intent resolutions:\nexpected: %s\nobserved: %s",
strKey, expIntents, resolved[strKey])
}
if kvs, err := tc.rng.sequence.GetAllTransactionID(tc.store.Engine(), txns[strKey].ID); err != nil {
t.Fatal(err)
} else if (len(kvs) != 0) == sp.expSeqGC {
return fmt.Errorf("%s: expected sequence cache gc: %t, found %+v", strKey, sp.expSeqGC, kvs)
}
}
return nil
})
outsideTxnPrefix := keys.TransactionKey(outsideKey, uuid.EmptyUUID)
outsideTxnPrefixEnd := keys.TransactionKey(outsideKey.Next(), uuid.EmptyUUID)
var count int
if _, err := engine.MVCCIterate(tc.store.Engine(), outsideTxnPrefix, outsideTxnPrefixEnd, roachpb.ZeroTimestamp,
true, nil, false, func(roachpb.KeyValue) (bool, error) {
count++
return false, nil
}); err != nil {
t.Fatal(err)
}
if exp := len(testCases); exp != count {
t.Fatalf("expected the %d external transaction entries to remain untouched, "+
"but only %d are left", exp, count)
}
}