本文整理匯總了Golang中github.com/cockroachdb/cockroach/storage.Store類的典型用法代碼示例。如果您正苦於以下問題:Golang Store類的具體用法?Golang Store怎麽用?Golang Store使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。
在下文中一共展示了Store類的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: getGauge
func getGauge(t *testing.T, s *storage.Store, key string) int64 {
gauge := s.Registry().GetGauge(key)
if gauge == nil {
t.Fatal(util.ErrorfSkipFrames(1, "store did not contain gauge %s", key))
}
return gauge.Value()
}
示例2: verifyRocksDBStats
func verifyRocksDBStats(t *testing.T, s *storage.Store) {
if err := s.ComputeMetrics(0); err != nil {
t.Fatal(err)
}
m := s.Metrics()
testcases := []struct {
gauge *metric.Gauge
min int64
}{
{m.RdbBlockCacheHits, 10},
{m.RdbBlockCacheMisses, 0},
{m.RdbBlockCacheUsage, 0},
{m.RdbBlockCachePinnedUsage, 0},
{m.RdbBloomFilterPrefixChecked, 20},
{m.RdbBloomFilterPrefixUseful, 20},
{m.RdbMemtableHits, 0},
{m.RdbMemtableMisses, 0},
{m.RdbMemtableTotalSize, 5000},
{m.RdbFlushes, 1},
{m.RdbCompactions, 0},
{m.RdbTableReadersMemEstimate, 50},
}
for _, tc := range testcases {
if a := tc.gauge.Value(); a < tc.min {
t.Errorf("gauge %s = %d < min %d", tc.gauge.GetName(), a, tc.min)
}
}
}
示例3: getCounter
func getCounter(t *testing.T, s *storage.Store, key string) int64 {
counter := s.Registry().GetCounter(key)
if counter == nil {
t.Fatal(util.ErrorfSkipFrames(1, "store did not contain counter %s", key))
}
return counter.Count()
}
示例4: ExecuteCmd
// ExecuteCmd synchronously runs Store.ExecuteCmd. The store is looked
// up from the store map if specified by header.Replica; otherwise,
// the command is being executed locally, and the replica is
// determined via lookup of header.Key in the ranges slice.
func (kv *LocalKV) ExecuteCmd(method string, args proto.Request, replyChan interface{}) {
// If the replica isn't specified in the header, look it up.
var err error
var store *storage.Store
// If we aren't given a Replica, then a little bending over
// backwards here. We need to find the Store, but all we have is the
// Key. So find its Range locally, and pull out its Replica which we
// use to find the Store. This lets us use the same codepath below
// (store.ExecuteCmd) for both locally and remotely originated
// commands.
header := args.Header()
if header.Replica.NodeID == 0 {
if repl := kv.lookupReplica(header.Key); repl != nil {
header.Replica = *repl
} else {
err = util.Errorf("unable to lookup range replica for key %q", string(header.Key))
}
}
if err == nil {
store, err = kv.GetStore(&header.Replica)
}
reply := reflect.New(reflect.TypeOf(replyChan).Elem().Elem()).Interface().(proto.Response)
if err != nil {
reply.Header().SetGoError(err)
} else {
store.ExecuteCmd(method, args, reply)
}
reflect.ValueOf(replyChan).Send(reflect.ValueOf(reply))
}
示例5: executeCmd
// executeCmd runs Store.ExecuteCmd in a goroutine. A channel with
// element type equal to the reply type is created and returned
// immediately. The reply is sent to the channel once the cmd has been
// executed by the store. The store is looked up from the store map
// if specified by header.Replica; otherwise, the command is being
// executed locally, and the replica is determined via lookup of
// header.Key in the ranges slice.
func (db *LocalDB) executeCmd(method string, header *storage.RequestHeader, args, reply interface{}) interface{} {
chanVal := reflect.MakeChan(reflect.ChanOf(reflect.BothDir, reflect.TypeOf(reply)), 1)
replyVal := reflect.ValueOf(reply)
go func() {
// If the replica isn't specified in the header, look it up.
var err error
var store *storage.Store
// If we aren't given a Replica, then a little bending over backwards here. We need to find the Store, but all
// we have is the Key. So find its Range locally, and pull out its Replica which we use to find the Store.
// This lets us use the same codepath below (store.ExecuteCmd) for both locally and remotely originated
// commands.
if header.Replica.NodeID == 0 {
if repl := db.lookupReplica(header.Key); repl != nil {
header.Replica = *repl
} else {
err = util.Errorf("unable to lookup range replica for key %q", string(header.Key))
}
}
if err == nil {
store, err = db.GetStore(&header.Replica)
}
if err != nil {
reflect.Indirect(replyVal).FieldByName("Error").Set(reflect.ValueOf(err))
} else {
store.ExecuteCmd(method, header, args, reply)
}
chanVal.Send(replyVal)
}()
return chanVal.Interface()
}
示例6: executeCmd
// executeCmd synchronously runs Store.ExecuteCmd. The store is looked
// up from the store map if specified by header.Replica; otherwise,
// the command is being executed locally, and the replica is
// determined via lookup of header.Key in the ranges slice.
func (db *LocalDB) executeCmd(method string, args storage.Request, reply storage.Response) {
// If the replica isn't specified in the header, look it up.
var err error
var store *storage.Store
// If we aren't given a Replica, then a little bending over
// backwards here. We need to find the Store, but all we have is the
// Key. So find its Range locally, and pull out its Replica which we
// use to find the Store. This lets us use the same codepath below
// (store.ExecuteCmd) for both locally and remotely originated
// commands.
header := args.Header()
if header.Replica.NodeID == 0 {
if repl := db.lookupReplica(header.Key); repl != nil {
header.Replica = *repl
} else {
err = util.Errorf("unable to lookup range replica for key %q", string(header.Key))
}
}
if err == nil {
store, err = db.GetStore(&header.Replica)
}
if err != nil {
reply.Header().Error = err
} else {
store.ExecuteCmd(method, args, reply)
}
}
示例7: fillRange
// fillRange writes keys with the given prefix and associated values
// until bytes bytes have been written or the given range has split.
func fillRange(store *storage.Store, rangeID roachpb.RangeID, prefix roachpb.Key, bytes int64, t *testing.T) {
src := rand.New(rand.NewSource(0))
for {
var ms engine.MVCCStats
if err := engine.MVCCGetRangeStats(store.Engine(), rangeID, &ms); err != nil {
t.Fatal(err)
}
keyBytes, valBytes := ms.KeyBytes, ms.ValBytes
if keyBytes+valBytes >= bytes {
return
}
key := append(append([]byte(nil), prefix...), randutil.RandBytes(src, 100)...)
key = keys.MakeNonColumnKey(key)
val := randutil.RandBytes(src, int(src.Int31n(1<<8)))
pArgs := putArgs(key, val)
_, err := client.SendWrappedWith(store, nil, roachpb.Header{
RangeID: rangeID,
}, &pArgs)
// When the split occurs in the background, our writes may start failing.
// We know we can stop writing when this happens.
if _, ok := err.(*roachpb.RangeKeyMismatchError); ok {
return
} else if err != nil {
t.Fatal(err)
}
}
}
示例8: verifyRocksDBStats
func verifyRocksDBStats(t *testing.T, s *storage.Store) {
if err := s.ComputeMetrics(); err != nil {
t.Fatal(err)
}
testcases := []struct {
gaugeName string
min int64
}{
{"rocksdb.block.cache.hits", 10},
{"rocksdb.block.cache.misses", 0},
{"rocksdb.block.cache.usage", 0},
{"rocksdb.block.cache.pinned-usage", 0},
{"rocksdb.bloom.filter.prefix.checked", 20},
{"rocksdb.bloom.filter.prefix.useful", 20},
{"rocksdb.memtable.hits", 0},
{"rocksdb.memtable.misses", 0},
{"rocksdb.memtable.total-size", 5000},
{"rocksdb.flushes", 1},
{"rocksdb.compactions", 0},
{"rocksdb.table-readers-mem-estimate", 50},
}
for _, tc := range testcases {
if a := getGauge(t, s, tc.gaugeName); a < tc.min {
t.Errorf("gauge %s = %d < min %d", tc.gaugeName, a, tc.min)
}
}
}
示例9: ExecuteCmd
// ExecuteCmd synchronously runs Store.ExecuteCmd. The store is looked
// up from the store map if specified by header.Replica; otherwise,
// the command is being executed locally, and the replica is
// determined via lookup through each of the stores.
func (kv *LocalKV) ExecuteCmd(method string, args proto.Request, replyChan interface{}) {
// If the replica isn't specified in the header, look it up.
var err error
var store *storage.Store
// If we aren't given a Replica, then a little bending over
// backwards here. We need to find the Store, but all we have is the
// Key. So find its Range locally. This lets us use the same
// codepath below (store.ExecuteCmd) for both locally and remotely
// originated commands.
header := args.Header()
if header.Replica.StoreID == 0 {
var repl *proto.Replica
repl, err = kv.lookupReplica(header.Key, header.EndKey)
if err == nil {
header.Replica = *repl
}
}
if err == nil {
store, err = kv.GetStore(header.Replica.StoreID)
}
reply := reflect.New(reflect.TypeOf(replyChan).Elem().Elem()).Interface().(proto.Response)
if err != nil {
reply.Header().SetGoError(err)
} else {
store.ExecuteCmd(method, args, reply)
if err := reply.Verify(args); err != nil {
reply.Header().SetGoError(err)
}
}
reflect.ValueOf(replyChan).Send(reflect.ValueOf(reply))
}
示例10: compareStoreStatus
// compareStoreStatus ensures that the actual store status for the passed in
// store is updated correctly. It checks that the Desc.StoreID, Desc.Attrs,
// Desc.Node, Desc.Capacity.Capacity, NodeID, RangeCount, ReplicatedRangeCount
// are exactly correct and that the bytes and counts for Live, Key and Val are
// at least the expected value.
// The latest actual stats are returned.
func compareStoreStatus(t *testing.T, store *storage.Store, expectedStoreStatus *storage.StoreStatus, testNumber int) *storage.StoreStatus {
storeStatusKey := keys.StoreStatusKey(int32(store.Ident.StoreID))
gArgs, gReply := getArgs(storeStatusKey, 1, store.Ident.StoreID)
if err := store.ExecuteCmd(context.Background(), proto.Call{Args: gArgs, Reply: gReply}); err != nil {
t.Fatalf("%v: failure getting store status: %s", testNumber, err)
}
if gReply.Value == nil {
t.Errorf("%v: could not find store status at: %s", testNumber, storeStatusKey)
}
storeStatus := &storage.StoreStatus{}
if err := gogoproto.Unmarshal(gReply.Value.GetBytes(), storeStatus); err != nil {
t.Fatalf("%v: could not unmarshal store status: %+v", testNumber, gReply)
}
// Values much match exactly.
if expectedStoreStatus.Desc.StoreID != storeStatus.Desc.StoreID {
t.Errorf("%v: actual Desc.StoreID does not match expected\nexpected: %+v\nactual: %v\n", testNumber, expectedStoreStatus, storeStatus)
}
if !reflect.DeepEqual(expectedStoreStatus.Desc.Attrs, storeStatus.Desc.Attrs) {
t.Errorf("%v: actual Desc.Attrs does not match expected\nexpected: %+v\nactual: %v\n", testNumber, expectedStoreStatus, storeStatus)
}
if !reflect.DeepEqual(expectedStoreStatus.Desc.Node, storeStatus.Desc.Node) {
t.Errorf("%v: actual Desc.Attrs does not match expected\nexpected: %+v\nactual: %v\n", testNumber, expectedStoreStatus, storeStatus)
}
if storeStatus.Desc.Capacity.Capacity != expectedStoreStatus.Desc.Capacity.Capacity {
t.Errorf("%v: actual Desc.Capacity.Capacity does not match expected\nexpected: %+v\nactual: %v\n", testNumber, expectedStoreStatus, storeStatus)
}
if expectedStoreStatus.NodeID != storeStatus.NodeID {
t.Errorf("%v: actual node ID does not match expected\nexpected: %+v\nactual: %v\n", testNumber, expectedStoreStatus, storeStatus)
}
if expectedStoreStatus.RangeCount != storeStatus.RangeCount {
t.Errorf("%v: actual RangeCount does not match expected\nexpected: %+v\nactual: %v\n", testNumber, expectedStoreStatus, storeStatus)
}
if expectedStoreStatus.ReplicatedRangeCount != storeStatus.ReplicatedRangeCount {
t.Errorf("%v: actual ReplicatedRangeCount does not match expected\nexpected: %+v\nactual: %v\n", testNumber, expectedStoreStatus, storeStatus)
}
// Values should be >= to expected values.
if storeStatus.Stats.LiveBytes < expectedStoreStatus.Stats.LiveBytes {
t.Errorf("%v: actual Live Bytes is not greater or equal to expected\nexpected: %+v\nactual: %v\n", testNumber, expectedStoreStatus, storeStatus)
}
if storeStatus.Stats.KeyBytes < expectedStoreStatus.Stats.KeyBytes {
t.Errorf("%v: actual Key Bytes is not greater or equal to expected\nexpected: %+v\nactual: %v\n", testNumber, expectedStoreStatus, storeStatus)
}
if storeStatus.Stats.ValBytes < expectedStoreStatus.Stats.ValBytes {
t.Errorf("%v: actual Val Bytes is not greater or equal to expected\nexpected: %+v\nactual: %v\n", testNumber, expectedStoreStatus, storeStatus)
}
if storeStatus.Stats.LiveCount < expectedStoreStatus.Stats.LiveCount {
t.Errorf("%v: actual Live Count is not greater or equal to expected\nexpected: %+v\nactual: %v\n", testNumber, expectedStoreStatus, storeStatus)
}
if storeStatus.Stats.KeyCount < expectedStoreStatus.Stats.KeyCount {
t.Errorf("%v: actual Key Count is not greater or equal to expected\nexpected: %+v\nactual: %v\n", testNumber, expectedStoreStatus, storeStatus)
}
if storeStatus.Stats.ValCount < expectedStoreStatus.Stats.ValCount {
t.Errorf("%v: actual Val Count is not greater or equal to expected\nexpected: %+v\nactual: %v\n", testNumber, expectedStoreStatus, storeStatus)
}
return storeStatus
}
示例11: SendBatch
// SendBatch implements batch.Sender.
func (ls *LocalSender) SendBatch(ctx context.Context, ba proto.BatchRequest) (*proto.BatchResponse, error) {
trace := tracer.FromCtx(ctx)
var store *storage.Store
var err error
// If we aren't given a Replica, then a little bending over
// backwards here. This case applies exclusively to unittests.
if ba.RangeID == 0 || ba.Replica.StoreID == 0 {
var repl *proto.Replica
var rangeID proto.RangeID
rangeID, repl, err = ls.lookupReplica(ba.Key, ba.EndKey)
if err == nil {
ba.RangeID = rangeID
ba.Replica = *repl
}
}
ctx = log.Add(ctx,
log.Method, ba.Method(), // TODO(tschottdorf): Method() always `Batch`.
log.Key, ba.Key,
log.RangeID, ba.RangeID)
if err == nil {
store, err = ls.GetStore(ba.Replica.StoreID)
}
var br *proto.BatchResponse
if err == nil {
// For calls that read data within a txn, we can avoid uncertainty
// related retries in certain situations. If the node is in
// "CertainNodes", we need not worry about uncertain reads any
// more. Setting MaxTimestamp=Timestamp for the operation
// accomplishes that. See proto.Transaction.CertainNodes for details.
if ba.Txn != nil && ba.Txn.CertainNodes.Contains(ba.Replica.NodeID) {
// MaxTimestamp = Timestamp corresponds to no clock uncertainty.
trace.Event("read has no clock uncertainty")
ba.Txn.MaxTimestamp = ba.Txn.Timestamp
}
{
var tmpR proto.Response
// TODO(tschottdorf): &ba -> ba
tmpR, err = store.ExecuteCmd(ctx, &ba)
// TODO(tschottdorf): remove this dance once BatchResponse is returned.
if tmpR != nil {
br = tmpR.(*proto.BatchResponse)
if br.Error != nil {
panic(proto.ErrorUnexpectedlySet)
}
}
}
}
// TODO(tschottdorf): Later error needs to be associated to an index
// and ideally individual requests don't even have an error in their
// header. See #1891.
return br, err
}
示例12: AddStore
// AddStore adds the specified store to the store map.
func (db *LocalDB) AddStore(s *storage.Store) {
db.mu.Lock()
defer db.mu.Unlock()
if _, ok := db.storeMap[s.Ident.StoreID]; ok {
panic(fmt.Sprintf("cannot add store twice to local db: %+v", s.Ident))
}
db.storeMap[s.Ident.StoreID] = s
// Maintain a slice of ranges ordered by StartKey.
db.ranges = append(db.ranges, s.GetRanges()...)
sort.Sort(db.ranges)
}
示例13: Send
// Send implements the client.Sender interface. The store is looked
// up from the store map if specified by header.Replica; otherwise,
// the command is being executed locally, and the replica is
// determined via lookup through each store's LookupRange method.
func (ls *LocalSender) Send(ctx context.Context, call proto.Call) {
var err error
var store *storage.Store
trace := tracer.FromCtx(ctx)
// If we aren't given a Replica, then a little bending over
// backwards here. This case applies exclusively to unittests.
header := call.Args.Header()
if header.RaftID == 0 || header.Replica.StoreID == 0 {
var repl *proto.Replica
var raftID proto.RaftID
raftID, repl, err = ls.lookupReplica(header.Key, header.EndKey)
if err == nil {
header.RaftID = raftID
header.Replica = *repl
}
}
ctx = log.Add(ctx,
log.Method, call.Method(),
log.Key, header.Key,
log.RaftID, header.RaftID)
if err == nil {
store, err = ls.GetStore(header.Replica.StoreID)
}
var reply proto.Response
if err == nil {
// For calls that read data within a txn, we can avoid uncertainty
// related retries in certain situations. If the node is in
// "CertainNodes", we need not worry about uncertain reads any
// more. Setting MaxTimestamp=Timestamp for the operation
// accomplishes that. See proto.Transaction.CertainNodes for details.
if header.Txn != nil && header.Txn.CertainNodes.Contains(header.Replica.NodeID) {
// MaxTimestamp = Timestamp corresponds to no clock uncertainty.
trace.Event("read has no clock uncertainty")
header.Txn.MaxTimestamp = header.Txn.Timestamp
}
reply, err = store.ExecuteCmd(ctx, call.Args)
}
if reply != nil {
gogoproto.Merge(call.Reply, reply)
}
if call.Reply.Header().Error != nil {
panic(proto.ErrorUnexpectedlySet)
}
if err != nil {
call.Reply.Header().SetGoError(err)
}
}
示例14: createSplitRanges
func createSplitRanges(store *storage.Store) (*roachpb.RangeDescriptor, *roachpb.RangeDescriptor, error) {
args := adminSplitArgs(roachpb.KeyMin, []byte("b"))
if _, err := client.SendWrapped(rg1(store), nil, &args); err != nil {
return nil, nil, err
}
rangeADesc := store.LookupReplica([]byte("a"), nil).Desc()
rangeBDesc := store.LookupReplica([]byte("c"), nil).Desc()
if bytes.Equal(rangeADesc.StartKey, rangeBDesc.StartKey) {
log.Errorf("split ranges keys are equal %q!=%q", rangeADesc.StartKey, rangeBDesc.StartKey)
}
return rangeADesc, rangeBDesc, nil
}
示例15: Send
// Send implements the client.Sender interface. The store is looked up from the
// store map if specified by the request; otherwise, the command is being
// executed locally, and the replica is determined via lookup through each
// store's LookupRange method. The latter path is taken only by unit tests.
func (ls *LocalSender) Send(ctx context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) {
trace := tracer.FromCtx(ctx)
var store *storage.Store
var err error
// If we aren't given a Replica, then a little bending over
// backwards here. This case applies exclusively to unittests.
if ba.RangeID == 0 || ba.Replica.StoreID == 0 {
var repl *roachpb.ReplicaDescriptor
var rangeID roachpb.RangeID
key, endKey := keys.Range(ba)
rangeID, repl, err = ls.lookupReplica(key, endKey)
if err == nil {
ba.RangeID = rangeID
ba.Replica = *repl
}
}
ctx = log.Add(ctx,
log.RangeID, ba.RangeID)
if err == nil {
store, err = ls.GetStore(ba.Replica.StoreID)
}
var br *roachpb.BatchResponse
if err != nil {
return nil, roachpb.NewError(err)
}
// For calls that read data within a txn, we can avoid uncertainty
// related retries in certain situations. If the node is in
// "CertainNodes", we need not worry about uncertain reads any
// more. Setting MaxTimestamp=Timestamp for the operation
// accomplishes that. See roachpb.Transaction.CertainNodes for details.
if ba.Txn != nil && ba.Txn.CertainNodes.Contains(ba.Replica.NodeID) {
// MaxTimestamp = Timestamp corresponds to no clock uncertainty.
trace.Event("read has no clock uncertainty")
ba.Txn.MaxTimestamp = ba.Txn.Timestamp
}
br, pErr := store.Send(ctx, ba)
if br != nil && br.Error != nil {
panic(roachpb.ErrorUnexpectedlySet(store, br))
}
return br, pErr
}