本文整理汇总了Golang中github.com/cockroachdb/cockroach/pkg/storage/engine.MVCCIterate函数的典型用法代码示例。如果您正苦于以下问题:Golang MVCCIterate函数的具体用法?Golang MVCCIterate怎么用?Golang MVCCIterate使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了MVCCIterate函数的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: Iterate
// Iterate walks through the abort cache, invoking the given callback for
// each unmarshaled entry with the key, the transaction ID and the decoded
// entry.
// TODO(tschottdorf): should not use a pointer to UUID.
func (sc *AbortCache) Iterate(
ctx context.Context, e engine.Reader, f func([]byte, roachpb.AbortCacheEntry),
) {
_, _ = engine.MVCCIterate(ctx, e, sc.min(), sc.max(), hlc.ZeroTimestamp,
true /* consistent */, nil /* txn */, false, /* !reverse */
func(kv roachpb.KeyValue) (bool, error) {
var entry roachpb.AbortCacheEntry
if _, err := keys.DecodeAbortCacheKey(kv.Key, nil); err != nil {
panic(err) // TODO(tschottdorf): ReplicaCorruptionError
}
if err := kv.Value.GetProto(&entry); err != nil {
panic(err) // TODO(tschottdorf): ReplicaCorruptionError
}
f(kv.Key, entry)
return false, nil
})
}
示例2: iterateEntries
func iterateEntries(
ctx context.Context,
e engine.Reader,
rangeID roachpb.RangeID,
lo,
hi uint64,
scanFunc func(roachpb.KeyValue) (bool, error),
) error {
_, err := engine.MVCCIterate(
ctx, e,
keys.RaftLogKey(rangeID, lo),
keys.RaftLogKey(rangeID, hi),
hlc.ZeroTimestamp,
true, /* consistent */
nil, /* txn */
false, /* !reverse */
scanFunc,
)
return err
}
示例3: TestGCQueueTransactionTable
//.........这里部分代码省略.........
for strKey, test := range testCases {
baseKey := roachpb.Key(strKey)
txnClock := hlc.NewClock(hlc.NewManualClock(int64(test.orig)).UnixNano)
txn := newTransaction("txn1", baseKey, 1, enginepb.SERIALIZABLE, txnClock)
txn.Status = test.status
txn.Intents = testIntents
if test.hb > 0 {
txn.LastHeartbeat = &hlc.Timestamp{WallTime: int64(test.hb)}
}
// Set a high Timestamp to make sure it does not matter. Only
// OrigTimestamp (and heartbeat) are used for GC decisions.
txn.Timestamp.Forward(hlc.MaxTimestamp)
txns[strKey] = *txn
for _, addrKey := range []roachpb.Key{baseKey, outsideKey} {
key := keys.TransactionKey(addrKey, txn.ID)
if err := engine.MVCCPutProto(context.Background(), tc.engine, nil, key, hlc.ZeroTimestamp, nil, txn); err != nil {
t.Fatal(err)
}
}
entry := roachpb.AbortCacheEntry{Key: txn.Key, Timestamp: txn.LastActive()}
if err := tc.rng.abortCache.Put(context.Background(), tc.engine, nil, txn.ID, &entry); err != nil {
t.Fatal(err)
}
}
// Run GC.
gcQ := newGCQueue(tc.store, tc.gossip)
cfg, ok := tc.gossip.GetSystemConfig()
if !ok {
t.Fatal("config not set")
}
if err := gcQ.process(context.Background(), tc.clock.Now(), tc.rng, cfg); err != nil {
t.Fatal(err)
}
util.SucceedsSoon(t, func() error {
for strKey, sp := range testCases {
txn := &roachpb.Transaction{}
key := keys.TransactionKey(roachpb.Key(strKey), txns[strKey].ID)
ok, err := engine.MVCCGetProto(context.Background(), tc.engine, key, hlc.ZeroTimestamp, true, nil, txn)
if err != nil {
return err
}
if expGC := (sp.newStatus == -1); expGC {
if expGC != !ok {
return fmt.Errorf("%s: expected gc: %t, but found %s\n%s", strKey, expGC, txn, roachpb.Key(strKey))
}
} else if sp.newStatus != txn.Status {
return fmt.Errorf("%s: expected status %s, but found %s", strKey, sp.newStatus, txn.Status)
}
var expIntents []roachpb.Span
if sp.expResolve {
expIntents = testIntents
}
if !reflect.DeepEqual(resolved[strKey], expIntents) {
return fmt.Errorf("%s: unexpected intent resolutions:\nexpected: %s\nobserved: %s",
strKey, expIntents, resolved[strKey])
}
entry := &roachpb.AbortCacheEntry{}
abortExists, err := tc.rng.abortCache.Get(context.Background(), tc.store.Engine(), txns[strKey].ID, entry)
if err != nil {
t.Fatal(err)
}
if abortExists == sp.expAbortGC {
return fmt.Errorf("%s: expected abort cache gc: %t, found %+v", strKey, sp.expAbortGC, entry)
}
}
return nil
})
outsideTxnPrefix := keys.TransactionKey(outsideKey, uuid.EmptyUUID)
outsideTxnPrefixEnd := keys.TransactionKey(outsideKey.Next(), uuid.EmptyUUID)
var count int
if _, err := engine.MVCCIterate(context.Background(), tc.store.Engine(), outsideTxnPrefix, outsideTxnPrefixEnd, hlc.ZeroTimestamp,
true, nil, false, func(roachpb.KeyValue) (bool, error) {
count++
return false, nil
}); err != nil {
t.Fatal(err)
}
if exp := len(testCases); exp != count {
t.Fatalf("expected the %d external transaction entries to remain untouched, "+
"but only %d are left", exp, count)
}
batch := tc.engine.NewSnapshot()
defer batch.Close()
tc.rng.assertState(batch) // check that in-mem and on-disk state were updated
tc.rng.mu.Lock()
txnSpanThreshold := tc.rng.mu.state.TxnSpanGCThreshold
tc.rng.mu.Unlock()
// Verify that the new TxnSpanGCThreshold has reached the Replica.
if expWT := int64(gcTxnAndAC); txnSpanThreshold.WallTime != expWT {
t.Fatalf("expected TxnSpanGCThreshold.Walltime %d, got timestamp %s",
expWT, txnSpanThreshold)
}
}
示例4: processTransactionTable
// processTransactionTable scans the transaction table and updates txnMap with
// those transactions which are old and either PENDING or with intents
// registered. In the first case we want to push the transaction so that it is
// aborted, and in the second case we may have to resolve the intents success-
// fully before GCing the entry. The transaction records which can be gc'ed are
// returned separately and are not added to txnMap nor intentSpanMap.
func processTransactionTable(
ctx context.Context,
snap engine.Reader,
desc *roachpb.RangeDescriptor,
txnMap map[uuid.UUID]*roachpb.Transaction,
cutoff hlc.Timestamp,
infoMu *lockableGCInfo,
resolveIntents resolveFunc,
) ([]roachpb.GCRequest_GCKey, error) {
infoMu.Lock()
defer infoMu.Unlock()
var gcKeys []roachpb.GCRequest_GCKey
handleOne := func(kv roachpb.KeyValue) error {
var txn roachpb.Transaction
if err := kv.Value.GetProto(&txn); err != nil {
return err
}
infoMu.TransactionSpanTotal++
if !txn.LastActive().Less(cutoff) {
return nil
}
txnID := *txn.ID
// The transaction record should be considered for removal.
switch txn.Status {
case roachpb.PENDING:
// Marked as running, so we need to push it to abort it but won't
// try to GC it in this cycle (for convenience).
// TODO(tschottdorf): refactor so that we can GC PENDING entries
// in the same cycle, but keeping the calls to pushTxn in a central
// location (keeping it easy to batch them up in the future).
infoMu.TransactionSpanGCPending++
txnMap[txnID] = &txn
return nil
case roachpb.ABORTED:
// If we remove this transaction, it effectively still counts as
// ABORTED (by design). So this can be GC'ed even if we can't
// resolve the intents.
// Note: Most aborted transaction weren't aborted by their client,
// but instead by the coordinator - those will not have any intents
// persisted, though they still might exist in the system.
infoMu.TransactionSpanGCAborted++
func() {
infoMu.Unlock() // intentional
defer infoMu.Lock()
if err := resolveIntents(roachpb.AsIntents(txn.Intents, &txn),
true /* wait */, false /* !poison */); err != nil {
log.Warningf(ctx, "failed to resolve intents of aborted txn on gc: %s", err)
}
}()
case roachpb.COMMITTED:
// It's committed, so it doesn't need a push but we can only
// GC it after its intents are resolved.
if err := func() error {
infoMu.Unlock() // intentional
defer infoMu.Lock()
return resolveIntents(roachpb.AsIntents(txn.Intents, &txn), true /* wait */, false /* !poison */)
}(); err != nil {
log.Warningf(ctx, "unable to resolve intents of committed txn on gc: %s", err)
// Returning the error here would abort the whole GC run, and
// we don't want that. Instead, we simply don't GC this entry.
return nil
}
infoMu.TransactionSpanGCCommitted++
default:
panic(fmt.Sprintf("invalid transaction state: %s", txn))
}
gcKeys = append(gcKeys, roachpb.GCRequest_GCKey{Key: kv.Key}) // zero timestamp
return nil
}
startKey := keys.TransactionKey(desc.StartKey.AsRawKey(), uuid.UUID{})
endKey := keys.TransactionKey(desc.EndKey.AsRawKey(), uuid.UUID{})
_, err := engine.MVCCIterate(ctx, snap, startKey, endKey,
hlc.ZeroTimestamp, true /* consistent */, nil, /* txn */
false /* !reverse */, func(kv roachpb.KeyValue) (bool, error) {
return false, handleOne(kv)
})
return gcKeys, err
}
示例5: TestStoreRangeMergeMetadataCleanup
// TestStoreRangeMergeMetadataCleanup tests that all metadata of a
// subsumed range is cleaned up on merge.
func TestStoreRangeMergeMetadataCleanup(t *testing.T) {
defer leaktest.AfterTest(t)()
storeCfg := storage.TestStoreConfig(nil)
storeCfg.TestingKnobs.DisableSplitQueue = true
store, stopper := createTestStoreWithConfig(t, storeCfg)
defer stopper.Stop()
scan := func(f func(roachpb.KeyValue) (bool, error)) {
if _, err := engine.MVCCIterate(context.Background(), store.Engine(), roachpb.KeyMin, roachpb.KeyMax, hlc.ZeroTimestamp, true, nil, false, f); err != nil {
t.Fatal(err)
}
}
content := roachpb.Key("testing!")
// Write some values left of the proposed split key.
pArgs := putArgs([]byte("aaa"), content)
if _, err := client.SendWrapped(context.Background(), rg1(store), &pArgs); err != nil {
t.Fatal(err)
}
// Collect all the keys.
preKeys := make(map[string]struct{})
scan(func(kv roachpb.KeyValue) (bool, error) {
preKeys[string(kv.Key)] = struct{}{}
return false, nil
})
// Split the range.
_, bDesc, err := createSplitRanges(store)
if err != nil {
t.Fatal(err)
}
// Write some values right of the split key.
pArgs = putArgs([]byte("ccc"), content)
if _, err := client.SendWrappedWith(context.Background(), rg1(store), roachpb.Header{
RangeID: bDesc.RangeID,
}, &pArgs); err != nil {
t.Fatal(err)
}
// Merge the b range back into the a range.
args := adminMergeArgs(roachpb.KeyMin)
if _, err := client.SendWrapped(context.Background(), rg1(store), &args); err != nil {
t.Fatal(err)
}
// Collect all the keys again.
postKeys := make(map[string]struct{})
scan(func(kv roachpb.KeyValue) (bool, error) {
postKeys[string(kv.Key)] = struct{}{}
return false, nil
})
// Compute the new keys.
for k := range preKeys {
delete(postKeys, k)
}
// Keep only the subsumed range's local keys.
localRangeKeyPrefix := string(keys.MakeRangeIDPrefix(bDesc.RangeID))
for k := range postKeys {
if !strings.HasPrefix(k, localRangeKeyPrefix) {
delete(postKeys, k)
}
}
if numKeys := len(postKeys); numKeys > 0 {
var buf bytes.Buffer
fmt.Fprintf(&buf, "%d keys were not cleaned up:\n", numKeys)
for k := range postKeys {
fmt.Fprintf(&buf, "%q\n", k)
}
t.Fatal(buf.String())
}
}
示例6: runDebugCheckStoreCmd
func runDebugCheckStoreCmd(cmd *cobra.Command, args []string) error {
stopper := stop.NewStopper()
defer stopper.Stop()
if len(args) != 1 {
return errors.New("one required argument: dir")
}
db, err := openStore(cmd, args[0], stopper)
if err != nil {
return err
}
// Iterate over the entire range-id-local space.
start := roachpb.Key(keys.LocalRangeIDPrefix)
end := start.PrefixEnd()
replicaInfo := map[roachpb.RangeID]*replicaCheckInfo{}
getReplicaInfo := func(rangeID roachpb.RangeID) *replicaCheckInfo {
if info, ok := replicaInfo[rangeID]; ok {
return info
}
replicaInfo[rangeID] = &replicaCheckInfo{}
return replicaInfo[rangeID]
}
if _, err := engine.MVCCIterate(context.Background(), db, start, end, hlc.MaxTimestamp,
false /* !consistent */, nil, /* txn */
false /* !reverse */, func(kv roachpb.KeyValue) (bool, error) {
rangeID, _, suffix, detail, err := keys.DecodeRangeIDKey(kv.Key)
if err != nil {
return false, err
}
switch {
case bytes.Equal(suffix, keys.LocalRaftTruncatedStateSuffix):
var trunc roachpb.RaftTruncatedState
if err := kv.Value.GetProto(&trunc); err != nil {
return false, err
}
getReplicaInfo(rangeID).truncatedIndex = trunc.Index
case bytes.Equal(suffix, keys.LocalRaftAppliedIndexSuffix):
idx, err := kv.Value.GetInt()
if err != nil {
return false, err
}
getReplicaInfo(rangeID).appliedIndex = uint64(idx)
case bytes.Equal(suffix, keys.LocalRaftLogSuffix):
_, index, err := encoding.DecodeUint64Ascending(detail)
if err != nil {
return false, err
}
ri := getReplicaInfo(rangeID)
if ri.firstIndex == 0 {
ri.firstIndex = index
ri.lastIndex = index
} else {
if index != ri.lastIndex+1 {
fmt.Printf("range %s: log index anomaly: %v followed by %v\n",
rangeID, ri.lastIndex, index)
}
ri.lastIndex = index
}
}
return false, nil
}); err != nil {
return err
}
for rangeID, info := range replicaInfo {
if info.truncatedIndex != info.firstIndex-1 {
fmt.Printf("range %s: truncated index %v should equal first index %v - 1\n",
rangeID, info.truncatedIndex, info.firstIndex)
}
if info.appliedIndex < info.firstIndex || info.appliedIndex > info.lastIndex {
fmt.Printf("range %s: applied index %v should be between first index %v and last index %v\n",
rangeID, info.appliedIndex, info.firstIndex, info.lastIndex)
}
}
return nil
}
示例7: runDebugGCCmd
func runDebugGCCmd(cmd *cobra.Command, args []string) error {
stopper := stop.NewStopper()
defer stopper.Stop()
if len(args) != 1 {
return errors.New("one argument required: dir")
}
var rangeID roachpb.RangeID
if len(args) == 2 {
var err error
if rangeID, err = parseRangeID(args[1]); err != nil {
return err
}
}
db, err := openStore(cmd, args[0], stopper)
if err != nil {
return err
}
start := keys.RangeDescriptorKey(roachpb.RKeyMin)
end := keys.RangeDescriptorKey(roachpb.RKeyMax)
var descs []roachpb.RangeDescriptor
if _, err := engine.MVCCIterate(context.Background(), db, start, end, hlc.MaxTimestamp,
false /* !consistent */, nil, /* txn */
false /* !reverse */, func(kv roachpb.KeyValue) (bool, error) {
var desc roachpb.RangeDescriptor
_, suffix, _, err := keys.DecodeRangeKey(kv.Key)
if err != nil {
return false, err
}
if !bytes.Equal(suffix, keys.LocalRangeDescriptorSuffix) {
return false, nil
}
if err := kv.Value.GetProto(&desc); err != nil {
return false, err
}
if desc.RangeID == rangeID || rangeID == 0 {
descs = append(descs, desc)
}
return desc.RangeID == rangeID, nil
}); err != nil {
return err
}
if len(descs) == 0 {
return fmt.Errorf("no range matching the criteria found")
}
for _, desc := range descs {
snap := db.NewSnapshot()
defer snap.Close()
_, info, err := storage.RunGC(context.Background(), &desc, snap, hlc.Timestamp{WallTime: timeutil.Now().UnixNano()},
config.GCPolicy{TTLSeconds: 24 * 60 * 60 /* 1 day */}, func(_ hlc.Timestamp, _ *roachpb.Transaction, _ roachpb.PushTxnType) {
}, func(_ []roachpb.Intent, _, _ bool) error { return nil })
if err != nil {
return err
}
fmt.Printf("RangeID: %d [%s, %s):\n", desc.RangeID, desc.StartKey, desc.EndKey)
_, _ = pretty.Println(info)
}
return nil
}