本文整理汇总了Golang中github.com/cockroachdb/cockroach/storage/engine.MVCCGetProto函数的典型用法代码示例。如果您正苦于以下问题:Golang MVCCGetProto函数的具体用法?Golang MVCCGetProto怎么用?Golang MVCCGetProto使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了MVCCGetProto函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: TestRemoveRangeWithoutGC
// TestRemoveRangeWithoutGC ensures that we do not panic when a
// replica has been removed but not yet GC'd (and therefore
// does not have an active raft group).
func TestRemoveRangeWithoutGC(t *testing.T) {
defer leaktest.AfterTest(t)
mtc := startMultiTestContext(t, 2)
defer mtc.Stop()
// Disable the GC queue and move the range from store 0 to 1.
mtc.stores[0].DisableReplicaGCQueue(true)
const rangeID roachpb.RangeID = 1
mtc.replicateRange(rangeID, 1)
mtc.unreplicateRange(rangeID, 0)
// Wait for store 0 to process the removal.
util.SucceedsWithin(t, time.Second, func() error {
rep, err := mtc.stores[0].GetReplica(rangeID)
if err != nil {
return err
}
desc := rep.Desc()
if len(desc.Replicas) != 1 {
return util.Errorf("range has %d replicas", len(desc.Replicas))
}
return nil
})
// The replica's data is still on disk even though the Replica
// object is removed.
var desc roachpb.RangeDescriptor
descKey := keys.RangeDescriptorKey(roachpb.RKeyMin)
if ok, err := engine.MVCCGetProto(mtc.stores[0].Engine(), descKey,
mtc.stores[0].Clock().Now(), true, nil, &desc); err != nil {
t.Fatal(err)
} else if !ok {
t.Fatal("expected range descriptor to be present")
}
// Stop and restart the store to reset the replica's raftGroup
// pointer to nil. As long as the store has not been restarted it
// can continue to use its last known replica ID.
mtc.stopStore(0)
mtc.restartStore(0)
// Turn off the GC queue to ensure that the replica is deleted at
// startup instead of by the scanner. This is not 100% guaranteed
// since the scanner could have already run at this point, but it
// should be enough to prevent us from accidentally relying on the
// scanner.
mtc.stores[0].DisableReplicaGCQueue(true)
// The Replica object is not recreated.
if _, err := mtc.stores[0].GetReplica(rangeID); err == nil {
t.Fatalf("expected replica to be missing")
}
// And the data is no longer on disk.
if ok, err := engine.MVCCGetProto(mtc.stores[0].Engine(), descKey,
mtc.stores[0].Clock().Now(), true, nil, &desc); err != nil {
t.Fatal(err)
} else if ok {
t.Fatal("expected range descriptor to be absent")
}
}
示例2: TestReplicateRange
// TestReplicateRange verifies basic replication functionality by creating two stores
// and a range, replicating the range to the second store, and reading its data there.
func TestReplicateRange(t *testing.T) {
defer leaktest.AfterTest(t)
mtc := multiTestContext{}
mtc.Start(t, 2)
defer mtc.Stop()
// Issue a command on the first node before replicating.
incArgs, incResp := incrementArgs([]byte("a"), 5, 1, mtc.stores[0].StoreID())
if err := mtc.stores[0].ExecuteCmd(context.Background(), proto.Call{Args: incArgs, Reply: incResp}); err != nil {
t.Fatal(err)
}
rng, err := mtc.stores[0].GetRange(1)
if err != nil {
t.Fatal(err)
}
if err := rng.ChangeReplicas(proto.ADD_REPLICA,
proto.Replica{
NodeID: mtc.stores[1].Ident.NodeID,
StoreID: mtc.stores[1].Ident.StoreID,
}); err != nil {
t.Fatal(err)
}
// Verify no intent remains on range descriptor key.
key := keys.RangeDescriptorKey(rng.Desc().StartKey)
desc := proto.RangeDescriptor{}
if ok, err := engine.MVCCGetProto(mtc.stores[0].Engine(), key, mtc.stores[0].Clock().Now(), true, nil, &desc); !ok || err != nil {
t.Fatalf("fetching range descriptor yielded %t, %s", ok, err)
}
// Verify that in time, no intents remain on meta addressing
// keys, and that range descriptor on the meta records is correct.
util.SucceedsWithin(t, 1*time.Second, func() error {
meta2 := keys.RangeMetaKey(proto.KeyMax)
meta1 := keys.RangeMetaKey(meta2)
for _, key := range []proto.Key{meta2, meta1} {
metaDesc := proto.RangeDescriptor{}
if ok, err := engine.MVCCGetProto(mtc.stores[0].Engine(), key, mtc.stores[0].Clock().Now(), true, nil, &metaDesc); !ok || err != nil {
return util.Errorf("failed to resolve %s", key)
}
if !reflect.DeepEqual(metaDesc, desc) {
return util.Errorf("descs not equal: %+v != %+v", metaDesc, desc)
}
}
return nil
})
// Verify that the same data is available on the replica.
util.SucceedsWithin(t, 1*time.Second, func() error {
getArgs, getResp := getArgs([]byte("a"), 1, mtc.stores[1].StoreID())
getArgs.ReadConsistency = proto.INCONSISTENT
if err := mtc.stores[1].ExecuteCmd(context.Background(), proto.Call{Args: getArgs, Reply: getResp}); err != nil {
return util.Errorf("failed to read data")
}
if v := mustGetInteger(getResp.Value); v != 5 {
return util.Errorf("failed to read correct data: %d", v)
}
return nil
})
}
示例3: TestReplicateRange
// TestReplicateRange verifies basic replication functionality by creating two stores
// and a range, replicating the range to the second store, and reading its data there.
func TestReplicateRange(t *testing.T) {
defer leaktest.AfterTest(t)
mtc := startMultiTestContext(t, 2)
defer mtc.Stop()
// Issue a command on the first node before replicating.
incArgs := incrementArgs([]byte("a"), 5)
if _, err := client.SendWrapped(rg1(mtc.stores[0]), nil, &incArgs); err != nil {
t.Fatal(err)
}
rng, err := mtc.stores[0].GetReplica(1)
if err != nil {
t.Fatal(err)
}
if err := rng.ChangeReplicas(roachpb.ADD_REPLICA,
roachpb.ReplicaDescriptor{
NodeID: mtc.stores[1].Ident.NodeID,
StoreID: mtc.stores[1].Ident.StoreID,
}, rng.Desc()); err != nil {
t.Fatal(err)
}
// Verify no intent remains on range descriptor key.
key := keys.RangeDescriptorKey(rng.Desc().StartKey)
desc := roachpb.RangeDescriptor{}
if ok, err := engine.MVCCGetProto(mtc.stores[0].Engine(), key, mtc.stores[0].Clock().Now(), true, nil, &desc); !ok || err != nil {
t.Fatalf("fetching range descriptor yielded %t, %s", ok, err)
}
// Verify that in time, no intents remain on meta addressing
// keys, and that range descriptor on the meta records is correct.
util.SucceedsWithin(t, 1*time.Second, func() error {
meta2 := keys.Addr(keys.RangeMetaKey(roachpb.RKeyMax))
meta1 := keys.Addr(keys.RangeMetaKey(meta2))
for _, key := range []roachpb.RKey{meta2, meta1} {
metaDesc := roachpb.RangeDescriptor{}
if ok, err := engine.MVCCGetProto(mtc.stores[0].Engine(), key.AsRawKey(), mtc.stores[0].Clock().Now(), true, nil, &metaDesc); !ok || err != nil {
return util.Errorf("failed to resolve %s", key.AsRawKey())
}
if !reflect.DeepEqual(metaDesc, desc) {
return util.Errorf("descs not equal: %+v != %+v", metaDesc, desc)
}
}
return nil
})
// Verify that the same data is available on the replica.
util.SucceedsWithin(t, replicaReadTimeout, func() error {
getArgs := getArgs([]byte("a"))
if reply, err := client.SendWrappedWith(rg1(mtc.stores[1]), nil, roachpb.Header{
ReadConsistency: roachpb.INCONSISTENT,
}, &getArgs); err != nil {
return util.Errorf("failed to read data: %s", err)
} else if e, v := int64(5), mustGetInt(reply.(*roachpb.GetResponse).Value); v != e {
return util.Errorf("failed to read correct data: expected %d, got %d", e, v)
}
return nil
})
}
示例4: InternalHeartbeatTxn
// InternalHeartbeatTxn updates the transaction status and heartbeat
// timestamp after receiving transaction heartbeat messages from
// coordinator. Returns the updated transaction.
func (r *Range) InternalHeartbeatTxn(batch engine.Engine, ms *engine.MVCCStats, args proto.InternalHeartbeatTxnRequest) (proto.InternalHeartbeatTxnResponse, error) {
var reply proto.InternalHeartbeatTxnResponse
key := keys.TransactionKey(args.Txn.Key, args.Txn.ID)
var txn proto.Transaction
if ok, err := engine.MVCCGetProto(batch, key, proto.ZeroTimestamp, true, nil, &txn); err != nil {
return reply, err
} else if !ok {
// If no existing transaction record was found, initialize to a
// shallow copy of the transaction in the request header. We copy
// to avoid mutating the original below.
txn = *args.Txn
}
if txn.Status == proto.PENDING {
if txn.LastHeartbeat == nil {
txn.LastHeartbeat = &proto.Timestamp{}
}
if txn.LastHeartbeat.Less(args.Header().Timestamp) {
*txn.LastHeartbeat = args.Header().Timestamp
}
if err := engine.MVCCPutProto(batch, ms, key, proto.ZeroTimestamp, nil, &txn); err != nil {
return reply, err
}
}
reply.Txn = &txn
return reply, nil
}
示例5: InternalHeartbeatTxn
// InternalHeartbeatTxn updates the transaction status and heartbeat
// timestamp after receiving transaction heartbeat messages from
// coordinator. Returns the updated transaction.
func (r *Range) InternalHeartbeatTxn(batch engine.Engine, ms *engine.MVCCStats,
args *proto.InternalHeartbeatTxnRequest, reply *proto.InternalHeartbeatTxnResponse) {
key := keys.TransactionKey(args.Txn.Key, args.Txn.ID)
var txn proto.Transaction
ok, err := engine.MVCCGetProto(batch, key, proto.ZeroTimestamp, true, nil, &txn)
if err != nil {
reply.SetGoError(err)
return
}
// If no existing transaction record was found, initialize
// to the transaction in the request header.
if !ok {
gogoproto.Merge(&txn, args.Txn)
}
if txn.Status == proto.PENDING {
if txn.LastHeartbeat == nil {
txn.LastHeartbeat = &proto.Timestamp{}
}
if txn.LastHeartbeat.Less(args.Header().Timestamp) {
*txn.LastHeartbeat = args.Header().Timestamp
}
if err := engine.MVCCPutProto(batch, ms, key, proto.ZeroTimestamp, nil, &txn); err != nil {
reply.SetGoError(err)
return
}
}
reply.Txn = &txn
}
示例6: InitialState
// InitialState implements the raft.Storage interface.
func (r *Range) InitialState() (raftpb.HardState, raftpb.ConfState, error) {
var hs raftpb.HardState
found, err := engine.MVCCGetProto(r.rm.Engine(), keys.RaftHardStateKey(r.Desc().RaftID),
proto.ZeroTimestamp, true, nil, &hs)
if err != nil {
return raftpb.HardState{}, raftpb.ConfState{}, err
}
if !found {
// We don't have a saved HardState, so set up the defaults.
if r.isInitialized() {
// Set the initial log term.
hs.Term = raftInitialLogTerm
hs.Commit = raftInitialLogIndex
atomic.StoreUint64(&r.lastIndex, raftInitialLogIndex)
} else {
// This is a new range we are receiving from another node. Start
// from zero so we will receive a snapshot.
atomic.StoreUint64(&r.lastIndex, 0)
}
}
var cs raftpb.ConfState
// For uninitalized ranges, membership is unknown at this point.
if found || r.isInitialized() {
for _, rep := range r.Desc().Replicas {
cs.Nodes = append(cs.Nodes, uint64(proto.MakeRaftNodeID(rep.NodeID, rep.StoreID)))
}
}
return hs, cs, nil
}
示例7: loadLeaderLease
func loadLeaderLease(eng engine.Engine, raftID proto.RaftID) (*proto.Lease, error) {
lease := &proto.Lease{}
if _, err := engine.MVCCGetProto(eng, keys.RaftLeaderLeaseKey(raftID), proto.ZeroTimestamp, true, nil, lease); err != nil {
return nil, err
}
return lease, nil
}
示例8: readBootstrapInfoLocked
func (ls *Stores) readBootstrapInfoLocked(bi *gossip.BootstrapInfo) error {
latestTS := roachpb.ZeroTimestamp
timestamps := map[roachpb.StoreID]roachpb.Timestamp{}
// Find the most recent bootstrap info, collecting timestamps for
// each store along the way.
for id, s := range ls.storeMap {
var storeBI gossip.BootstrapInfo
ok, err := engine.MVCCGetProto(s.engine, keys.StoreGossipKey(), roachpb.ZeroTimestamp, true, nil, &storeBI)
if err != nil {
return err
}
timestamps[id] = storeBI.Timestamp
if ok && latestTS.Less(storeBI.Timestamp) {
latestTS = storeBI.Timestamp
*bi = storeBI
}
}
// Update all stores with an earlier timestamp.
for id, s := range ls.storeMap {
if timestamps[id].Less(latestTS) {
if err := engine.MVCCPutProto(s.engine, nil, keys.StoreGossipKey(), roachpb.ZeroTimestamp, nil, bi); err != nil {
return err
}
log.Infof("updated gossip bootstrap info to %s", s)
}
}
ls.biLatestTS = latestTS
return nil
}
示例9: raftTruncatedStateLocked
// raftTruncatedStateLocked returns metadata about the log that preceded the
// first current entry. This includes both entries that have been compacted away
// and the dummy entries that make up the starting point of an empty log.
// raftTruncatedStateLocked requires that the replica lock be held.
func (r *Replica) raftTruncatedStateLocked() (roachpb.RaftTruncatedState, error) {
if r.mu.truncatedState != nil {
return *r.mu.truncatedState, nil
}
ts := roachpb.RaftTruncatedState{}
ok, err := engine.MVCCGetProto(r.store.Engine(), keys.RaftTruncatedStateKey(r.RangeID),
roachpb.ZeroTimestamp, true, nil, &ts)
if err != nil {
return ts, err
}
if !ok {
if r.isInitializedLocked() {
// If we created this range, set the initial log index/term.
ts.Index = raftInitialLogIndex
ts.Term = raftInitialLogTerm
} else {
// This is a new range we are receiving from another node. Start
// from zero so we will receive a snapshot.
ts.Index = 0
ts.Term = 0
}
}
if ts.Index != 0 {
r.mu.truncatedState = &ts
}
return ts, nil
}
示例10: raftTruncatedState
// raftTruncatedState returns metadata about the log that preceded the first
// current entry. This includes both entries that have been compacted away
// and the dummy entries that make up the starting point of an empty log.
func (r *Replica) raftTruncatedState() (proto.RaftTruncatedState, error) {
if ts := r.getCachedTruncatedState(); ts != nil {
return *ts, nil
}
ts := proto.RaftTruncatedState{}
ok, err := engine.MVCCGetProto(r.rm.Engine(), keys.RaftTruncatedStateKey(r.Desc().RangeID),
proto.ZeroTimestamp, true, nil, &ts)
if err != nil {
return ts, err
}
if !ok {
if r.isInitialized() {
// If we created this range, set the initial log index/term.
ts.Index = raftInitialLogIndex
ts.Term = raftInitialLogTerm
} else {
// This is a new range we are receiving from another node. Start
// from zero so we will receive a snapshot.
ts.Index = 0
ts.Term = 0
}
}
if ts.Index != 0 {
r.setCachedTruncatedState(&ts)
}
return ts, nil
}
示例11: Snapshot
// Snapshot implements the raft.Storage interface.
func (r *Replica) Snapshot() (raftpb.Snapshot, error) {
// Copy all the data from a consistent RocksDB snapshot into a RaftSnapshotData.
snap := r.rm.NewSnapshot()
defer snap.Close()
var snapData proto.RaftSnapshotData
// Read the range metadata from the snapshot instead of the members
// of the Range struct because they might be changed concurrently.
appliedIndex, err := r.loadAppliedIndex(snap)
if err != nil {
return raftpb.Snapshot{}, err
}
var desc proto.RangeDescriptor
// We ignore intents on the range descriptor (consistent=false) because we
// know they cannot be committed yet; operations that modify range
// descriptors resolve their own intents when they commit.
ok, err := engine.MVCCGetProto(snap, keys.RangeDescriptorKey(r.Desc().StartKey),
r.rm.Clock().Now(), false /* !consistent */, nil, &desc)
if err != nil {
return raftpb.Snapshot{}, util.Errorf("failed to get desc: %s", err)
}
if !ok {
return raftpb.Snapshot{}, util.Errorf("couldn't find range descriptor")
}
// Store RangeDescriptor as metadata, it will be retrieved by ApplySnapshot()
snapData.RangeDescriptor = desc
// Iterate over all the data in the range, including local-only data like
// the response cache.
for iter := newRangeDataIterator(r.Desc(), snap); iter.Valid(); iter.Next() {
snapData.KV = append(snapData.KV,
&proto.RaftSnapshotData_KeyValue{Key: iter.Key(), Value: iter.Value()})
}
data, err := gogoproto.Marshal(&snapData)
if err != nil {
return raftpb.Snapshot{}, err
}
// Synthesize our raftpb.ConfState from desc.
var cs raftpb.ConfState
for _, rep := range desc.Replicas {
cs.Nodes = append(cs.Nodes, uint64(proto.MakeRaftNodeID(rep.NodeID, rep.StoreID)))
}
term, err := r.Term(appliedIndex)
if err != nil {
return raftpb.Snapshot{}, util.Errorf("failed to fetch term of %d: %s", appliedIndex, err)
}
return raftpb.Snapshot{
Data: data,
Metadata: raftpb.SnapshotMetadata{
Index: appliedIndex,
Term: term,
ConfState: cs,
},
}, nil
}
示例12: raftTruncatedState
func raftTruncatedState(
eng engine.Reader, rangeID roachpb.RangeID,
) (roachpb.RaftTruncatedState, error) {
ts := roachpb.RaftTruncatedState{}
_, err := engine.MVCCGetProto(context.Background(), eng, keys.RaftTruncatedStateKey(rangeID),
hlc.ZeroTimestamp, true, nil, &ts)
return ts /* zero if not found */, err
}
示例13: loadGCThreshold
func loadGCThreshold(
ctx context.Context, reader engine.Reader, rangeID roachpb.RangeID,
) (hlc.Timestamp, error) {
var t hlc.Timestamp
_, err := engine.MVCCGetProto(ctx, reader, keys.RangeLastGCKey(rangeID),
hlc.ZeroTimestamp, true, nil, &t)
return t, err
}
示例14: GetGCMetadata
// GetGCMetadata reads the latest GC metadata for this range.
func (r *Range) GetGCMetadata() (*proto.GCMetadata, error) {
key := keys.RangeGCMetadataKey(r.Desc().RaftID)
gcMeta := &proto.GCMetadata{}
_, err := engine.MVCCGetProto(r.rm.Engine(), key, proto.ZeroTimestamp, true, nil, gcMeta)
if err != nil {
return nil, err
}
return gcMeta, nil
}
示例15: GetLastVerificationTimestamp
// GetLastVerificationTimestamp reads the timestamp at which the range's
// data was last verified.
func (r *Range) GetLastVerificationTimestamp() (proto.Timestamp, error) {
key := keys.RangeLastVerificationTimestampKey(r.Desc().RaftID)
timestamp := proto.Timestamp{}
_, err := engine.MVCCGetProto(r.rm.Engine(), key, proto.ZeroTimestamp, true, nil, ×tamp)
if err != nil {
return proto.ZeroTimestamp, err
}
return timestamp, nil
}