本文整理匯總了Golang中github.com/cockroachdb/cockroach/pkg/keys.Addr函數的典型用法代碼示例。如果您正苦於以下問題:Golang Addr函數的具體用法?Golang Addr怎麽用?Golang Addr使用的例子?那麽, 這裏精選的函數代碼示例或許可以為您提供幫助。
在下文中一共展示了Addr函數的13個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: prev
// prev gives the right boundary of the union of all requests which don't
// affect keys larger than the given key.
// TODO(tschottdorf): again, better on BatchRequest itself, but can't pull
// 'keys' into 'roachpb'.
func prev(ba roachpb.BatchRequest, k roachpb.RKey) (roachpb.RKey, error) {
candidate := roachpb.RKeyMin
for _, union := range ba.Requests {
inner := union.GetInner()
if _, ok := inner.(*roachpb.NoopRequest); ok {
continue
}
h := inner.Header()
addr, err := keys.Addr(h.Key)
if err != nil {
return nil, err
}
eAddr, err := keys.AddrUpperBound(h.EndKey)
if err != nil {
return nil, err
}
if len(eAddr) == 0 {
eAddr = addr.Next()
}
if !eAddr.Less(k) {
if !k.Less(addr) {
// Range contains k, so won't be able to go lower.
return k, nil
}
// Range is disjoint from [KeyMin,k).
continue
}
// We want the largest surviving candidate.
if candidate.Less(addr) {
candidate = addr
}
}
return candidate, nil
}
示例2: next
// next gives the left boundary of the union of all requests which don't
// affect keys less than the given key.
// TODO(tschottdorf): again, better on BatchRequest itself, but can't pull
// 'keys' into 'proto'.
func next(ba roachpb.BatchRequest, k roachpb.RKey) (roachpb.RKey, error) {
candidate := roachpb.RKeyMax
for _, union := range ba.Requests {
inner := union.GetInner()
if _, ok := inner.(*roachpb.NoopRequest); ok {
continue
}
h := inner.Header()
addr, err := keys.Addr(h.Key)
if err != nil {
return nil, err
}
if addr.Less(k) {
eAddr, err := keys.AddrUpperBound(h.EndKey)
if err != nil {
return nil, err
}
if k.Less(eAddr) {
// Starts below k, but continues beyond. Need to stay at k.
return k, nil
}
// Affects only [KeyMin,k).
continue
}
// We want the smallest of the surviving candidates.
if addr.Less(candidate) {
candidate = addr
}
}
return candidate, nil
}
示例3: metaKey
func metaKey(key roachpb.RKey) []byte {
rk, err := keys.Addr(keys.RangeMetaKey(key))
if err != nil {
panic(err)
}
return rk
}
示例4: TestKeyAddress
func TestKeyAddress(t *testing.T) {
defer leaktest.AfterTest(t)()
testCases := []struct {
key roachpb.Key
}{
{MakeNameMetadataKey(0, "BAR")},
{MakeNameMetadataKey(1, "BAR")},
{MakeNameMetadataKey(1, "foo")},
{MakeNameMetadataKey(2, "foo")},
{MakeDescMetadataKey(123)},
{MakeDescMetadataKey(124)},
}
var lastKey roachpb.Key
for i, test := range testCases {
resultAddr, err := keys.Addr(test.key)
if err != nil {
t.Fatal(err)
}
result := resultAddr.AsRawKey()
if result.Compare(lastKey) <= 0 {
t.Errorf("%d: key address %q is <= %q", i, result, lastKey)
}
lastKey = result
}
}
示例5: Seek
// Seek positions the iterator on the start of a span (span.Key or span.EndKey,
// depending on ScanDir). Note that span.EndKey is exclusive, regardless of
// scanDir.
// After calling this, ReplicaInfo() will return information about the range
// containing the start key of the span (or the end key, if the direction is
// Descending).
// NeedAnother() will return true until the iterator is positioned on or after
// the end of the span.
// Possible errors encountered should be checked for with Valid().
//
// Seek can be called repeatedly on the same iterator. To make optimal uses of
// caches, Seek()s should be performed on spans sorted according to the
// scanDir (if Descending, then the span with the highest keys should be
// Seek()ed first).
//
// scanDir changes the direction in which Next() will advance the iterator.
func (it *SpanResolverIterator) Seek(
ctx context.Context, span roachpb.Span, scanDir kv.ScanDirection,
) {
var key, endKey roachpb.RKey
var err error
if key, err = keys.Addr(span.Key); err != nil {
it.err = err
return
}
if endKey, err = keys.Addr(span.EndKey); err != nil {
it.err = err
return
}
oldSpan := it.curSpan
oldDir := it.dir
it.curSpan = roachpb.RSpan{
Key: key,
EndKey: endKey,
}
it.dir = scanDir
var seekKey roachpb.RKey
if scanDir == kv.Ascending {
seekKey = it.curSpan.Key
} else {
seekKey = it.curSpan.EndKey
}
// Check if the start of the span falls within the descriptor on which we're
// already positioned. If so, and if the direction also corresponds, there's
// no need to change the underlying iterator's state.
if it.dir == oldDir && it.it.Valid() {
if it.dir == kv.Ascending && oldSpan.ContainsKey(seekKey) {
return
}
if it.dir == kv.Descending && oldSpan.ContainsExclusiveEndKey(seekKey) {
return
}
}
it.it.Seek(ctx, seekKey, scanDir)
}
示例6: SplitRange
// SplitRange splits the range containing splitKey.
// The right range created by the split starts at the split key and extends to the
// original range's end key.
// Returns the new descriptors of the left and right ranges.
//
// splitKey must correspond to a SQL table key (it must end with a family ID /
// col ID).
func (ts *TestServer) SplitRange(
splitKey roachpb.Key,
) (roachpb.RangeDescriptor, roachpb.RangeDescriptor, error) {
splitRKey, err := keys.Addr(splitKey)
if err != nil {
return roachpb.RangeDescriptor{}, roachpb.RangeDescriptor{}, err
}
origRangeDesc, err := ts.LookupRange(splitKey)
if err != nil {
return roachpb.RangeDescriptor{}, roachpb.RangeDescriptor{}, err
}
if origRangeDesc.StartKey.Equal(splitRKey) {
return roachpb.RangeDescriptor{}, roachpb.RangeDescriptor{},
errors.Errorf(
"cannot split range %+v at start key %q", origRangeDesc, splitKey)
}
splitReq := roachpb.AdminSplitRequest{
Span: roachpb.Span{
Key: splitKey,
},
SplitKey: splitKey,
}
_, pErr := client.SendWrapped(context.Background(), ts.DistSender(), &splitReq)
if pErr != nil {
return roachpb.RangeDescriptor{}, roachpb.RangeDescriptor{},
errors.Errorf(
"%q: split unexpected error: %s", splitReq.SplitKey, pErr)
}
var leftRangeDesc, rightRangeDesc roachpb.RangeDescriptor
if err := ts.DB().GetProto(context.TODO(),
keys.RangeDescriptorKey(origRangeDesc.StartKey), &leftRangeDesc); err != nil {
return roachpb.RangeDescriptor{}, roachpb.RangeDescriptor{},
errors.Wrap(err, "could not look up left-hand side descriptor")
}
// The split point might not be exactly the one we requested (it can be
// adjusted slightly so we don't split in the middle of SQL rows). Update it
// to the real point.
splitRKey = leftRangeDesc.EndKey
if err := ts.DB().GetProto(context.TODO(),
keys.RangeDescriptorKey(splitRKey), &rightRangeDesc); err != nil {
return roachpb.RangeDescriptor{}, roachpb.RangeDescriptor{},
errors.Wrap(err, "could not look up right-hand side descriptor")
}
return leftRangeDesc, rightRangeDesc, nil
}
示例7: runLsRanges
func runLsRanges(cmd *cobra.Command, args []string) error {
if len(args) > 1 {
return usageAndError(cmd)
}
var startKey roachpb.Key
{
k := roachpb.KeyMin.Next()
if len(args) > 0 {
k = roachpb.Key(args[0])
}
rk, err := keys.Addr(k)
if err != nil {
panic(err)
}
startKey = keys.RangeMetaKey(rk)
}
endKey := keys.Meta2Prefix.PrefixEnd()
kvDB, stopper, err := MakeDBClient()
if err != nil {
return err
}
defer stopper.Stop()
rows, err := kvDB.Scan(context.Background(), startKey, endKey, maxResults)
if err != nil {
return err
}
for _, row := range rows {
desc := &roachpb.RangeDescriptor{}
if err := row.ValueProto(desc); err != nil {
return errors.Wrapf(err, "unable to unmarshal range descriptor at %s", row.Key)
}
fmt.Printf("%s-%s [%d]\n", desc.StartKey, desc.EndKey, desc.RangeID)
for i, replica := range desc.Replicas {
fmt.Printf("\t%d: node-id=%d store-id=%d\n",
i, replica.NodeID, replica.StoreID)
}
}
fmt.Printf("%d result(s)\n", len(rows))
return nil
}
示例8: checkEndTransactionTrigger
// checkEndTransactionTrigger verifies that an EndTransactionRequest
// that includes intents for the SystemDB keys sets the proper trigger.
func checkEndTransactionTrigger(args storagebase.FilterArgs) *roachpb.Error {
req, ok := args.Req.(*roachpb.EndTransactionRequest)
if !ok {
return nil
}
if !req.Commit {
// This is a rollback: skip trigger verification.
return nil
}
modifiedSpanTrigger := req.InternalCommitTrigger.GetModifiedSpanTrigger()
modifiedSystemConfigSpan := modifiedSpanTrigger != nil && modifiedSpanTrigger.SystemConfigSpan
var hasSystemKey bool
for _, span := range req.IntentSpans {
keyAddr, err := keys.Addr(span.Key)
if err != nil {
return roachpb.NewError(err)
}
if bytes.Compare(keyAddr, keys.SystemConfigSpan.Key) >= 0 &&
bytes.Compare(keyAddr, keys.SystemConfigSpan.EndKey) < 0 {
hasSystemKey = true
break
}
}
// If the transaction in question has intents in the system span, then
// modifiedSystemConfigSpan should always be true. However, it is possible
// for modifiedSystemConfigSpan to be set, even though no system keys are
// present. This can occur with certain conditional DDL statements (e.g.
// "CREATE TABLE IF NOT EXISTS"), which set the SystemConfigTrigger
// aggressively but may not actually end up changing the system DB depending
// on the current state.
// For more information, see the related comment at the beginning of
// planner.makePlan().
if hasSystemKey && !modifiedSystemConfigSpan {
return roachpb.NewError(errors.Errorf("EndTransaction hasSystemKey=%t, but hasSystemConfigTrigger=%t",
hasSystemKey, modifiedSystemConfigSpan))
}
return nil
}
示例9: truncate
// truncate restricts all contained requests to the given key range
// and returns a new BatchRequest.
// All requests contained in that batch are "truncated" to the given
// span, inserting NoopRequest appropriately to replace requests which
// are left without a key range to operate on. The number of non-noop
// requests after truncation is returned.
func truncate(ba roachpb.BatchRequest, rs roachpb.RSpan) (roachpb.BatchRequest, int, error) {
truncateOne := func(args roachpb.Request) (bool, roachpb.Span, error) {
if _, ok := args.(*roachpb.NoopRequest); ok {
return true, emptySpan, nil
}
header := args.Header()
if !roachpb.IsRange(args) {
// This is a point request.
if len(header.EndKey) > 0 {
return false, emptySpan, errors.Errorf("%T is not a range command, but EndKey is set", args)
}
keyAddr, err := keys.Addr(header.Key)
if err != nil {
return false, emptySpan, err
}
if !rs.ContainsKey(keyAddr) {
return false, emptySpan, nil
}
return true, header, nil
}
// We're dealing with a range-spanning request.
local := false
keyAddr, err := keys.Addr(header.Key)
if err != nil {
return false, emptySpan, err
}
endKeyAddr, err := keys.Addr(header.EndKey)
if err != nil {
return false, emptySpan, err
}
if l, r := !keyAddr.Equal(header.Key), !endKeyAddr.Equal(header.EndKey); l || r {
if !l || !r {
return false, emptySpan, errors.Errorf("local key mixed with global key in range")
}
local = true
}
if keyAddr.Less(rs.Key) {
// rs.Key can't be local because it contains range split points, which
// are never local.
if !local {
header.Key = rs.Key.AsRawKey()
} else {
// The local start key should be truncated to the boundary of local keys which
// address to rs.Key.
header.Key = keys.MakeRangeKeyPrefix(rs.Key)
}
}
if !endKeyAddr.Less(rs.EndKey) {
// rs.EndKey can't be local because it contains range split points, which
// are never local.
if !local {
header.EndKey = rs.EndKey.AsRawKey()
} else {
// The local end key should be truncated to the boundary of local keys which
// address to rs.EndKey.
header.EndKey = keys.MakeRangeKeyPrefix(rs.EndKey)
}
}
// Check whether the truncation has left any keys in the range. If not,
// we need to cut it out of the request.
if header.Key.Compare(header.EndKey) >= 0 {
return false, emptySpan, nil
}
return true, header, nil
}
var numNoop int
truncBA := ba
truncBA.Requests = make([]roachpb.RequestUnion, len(ba.Requests))
for pos, arg := range ba.Requests {
hasRequest, newHeader, err := truncateOne(arg.GetInner())
if !hasRequest {
// We omit this one, i.e. replace it with a Noop.
numNoop++
union := roachpb.RequestUnion{}
union.MustSetInner(&noopRequest)
truncBA.Requests[pos] = union
} else {
// Keep the old one. If we must adjust the header, must copy.
if inner := ba.Requests[pos].GetInner(); newHeader.Equal(inner.Header()) {
truncBA.Requests[pos] = ba.Requests[pos]
} else {
shallowCopy := inner.ShallowCopy()
shallowCopy.SetHeader(newHeader)
union := &truncBA.Requests[pos] // avoid operating on copy
union.MustSetInner(shallowCopy)
}
}
if err != nil {
return roachpb.BatchRequest{}, 0, err
}
}
return truncBA, len(ba.Requests) - numNoop, nil
}
示例10: partitionSpans
// partitionSpans finds out which nodes are owners for ranges touching the given
// spans, and splits the spans according to owning nodes. The result is a set of
// spanPartitions (one for each relevant node), which form a partitioning of the
// spans (i.e. they are non-overlapping and their union is exactly the original
// set of spans).
func (dsp *distSQLPlanner) partitionSpans(
planCtx *planningCtx, spans roachpb.Spans,
) ([]spanPartition, error) {
if len(spans) == 0 {
panic("no spans")
}
ctx := planCtx.ctx
splits := make([]spanPartition, 0, 1)
// nodeMap maps a nodeID to an index inside the splits array.
nodeMap := make(map[roachpb.NodeID]int)
it := planCtx.spanIter
for _, span := range spans {
var rspan roachpb.RSpan
var err error
if rspan.Key, err = keys.Addr(span.Key); err != nil {
return nil, err
}
if rspan.EndKey, err = keys.Addr(span.EndKey); err != nil {
return nil, err
}
var lastNodeID roachpb.NodeID
for it.Seek(ctx, span, kv.Ascending); ; it.Next(ctx) {
if !it.Valid() {
return nil, it.Error()
}
replInfo, err := it.ReplicaInfo(ctx)
if err != nil {
return nil, err
}
desc := it.Desc()
var trimmedSpan roachpb.Span
if rspan.Key.Less(desc.StartKey) {
trimmedSpan.Key = desc.StartKey.AsRawKey()
} else {
trimmedSpan.Key = span.Key
}
if desc.EndKey.Less(rspan.EndKey) {
trimmedSpan.EndKey = desc.EndKey.AsRawKey()
} else {
trimmedSpan.EndKey = span.EndKey
}
nodeID := replInfo.NodeDesc.NodeID
idx, ok := nodeMap[nodeID]
if !ok {
idx = len(splits)
splits = append(splits, spanPartition{node: nodeID})
nodeMap[nodeID] = idx
if _, ok := planCtx.nodeAddresses[nodeID]; !ok {
planCtx.nodeAddresses[nodeID] = replInfo.NodeDesc.Address.String()
}
}
split := &splits[idx]
if lastNodeID == nodeID {
// Two consecutive ranges on the same node, merge the spans.
if !split.spans[len(split.spans)-1].EndKey.Equal(trimmedSpan.Key) {
log.Fatalf(ctx, "expected consecutive span pieces %v %v", split.spans, trimmedSpan)
}
split.spans[len(split.spans)-1].EndKey = trimmedSpan.EndKey
} else {
split.spans = append(split.spans, trimmedSpan)
}
lastNodeID = nodeID
if !it.NeedAnother() {
break
}
}
}
return splits, nil
}
示例11: TableStats
// TableStats is an endpoint that returns columns, indices, and other
// relevant details for the specified table.
func (s *adminServer) TableStats(
ctx context.Context, req *serverpb.TableStatsRequest,
) (*serverpb.TableStatsResponse, error) {
// Get table span.
var tableSpan roachpb.Span
iexecutor := sql.InternalExecutor{LeaseManager: s.server.leaseMgr}
if err := s.server.db.Txn(ctx, func(txn *client.Txn) error {
var err error
tableSpan, err = iexecutor.GetTableSpan(s.getUser(req), txn, req.Database, req.Table)
return err
}); err != nil {
return nil, s.serverError(err)
}
startKey, err := keys.Addr(tableSpan.Key)
if err != nil {
return nil, s.serverError(err)
}
endKey, err := keys.Addr(tableSpan.EndKey)
if err != nil {
return nil, s.serverError(err)
}
// Get current range descriptors for table. This is done by scanning over
// meta2 keys for the range.
rangeDescKVs, err := s.server.db.Scan(ctx, keys.RangeMetaKey(startKey), keys.RangeMetaKey(endKey), 0)
if err != nil {
return nil, s.serverError(err)
}
// Extract a list of node IDs from the response.
nodeIDs := make(map[roachpb.NodeID]struct{})
for _, kv := range rangeDescKVs {
var rng roachpb.RangeDescriptor
if err := kv.Value.GetProto(&rng); err != nil {
return nil, s.serverError(err)
}
for _, repl := range rng.Replicas {
nodeIDs[repl.NodeID] = struct{}{}
}
}
// Construct TableStatsResponse by sending an RPC to every node involved.
tableStatResponse := serverpb.TableStatsResponse{
NodeCount: int64(len(nodeIDs)),
RangeCount: int64(len(rangeDescKVs)),
}
type nodeResponse struct {
nodeID roachpb.NodeID
resp *serverpb.SpanStatsResponse
err error
}
// Send a SpanStats query to each node. Set a timeout on the context for
// these queries.
responses := make(chan nodeResponse)
nodeCtx, cancel := context.WithTimeout(ctx, base.NetworkTimeout)
defer cancel()
for nodeID := range nodeIDs {
nodeID := nodeID
if err := s.server.stopper.RunAsyncTask(nodeCtx, func(ctx context.Context) {
var spanResponse *serverpb.SpanStatsResponse
client, err := s.server.status.dialNode(nodeID)
if err == nil {
req := serverpb.SpanStatsRequest{
StartKey: startKey,
EndKey: endKey,
NodeID: nodeID.String(),
}
spanResponse, err = client.SpanStats(ctx, &req)
}
response := nodeResponse{
nodeID: nodeID,
resp: spanResponse,
err: err,
}
select {
case responses <- response:
// Response processed.
case <-ctx.Done():
// Context completed, response no longer needed.
}
}); err != nil {
return nil, err
}
}
for remainingResponses := len(nodeIDs); remainingResponses > 0; remainingResponses-- {
select {
case resp := <-responses:
// For nodes which returned an error, note that the node's data
// is missing. For successful calls, aggregate statistics.
if resp.err != nil {
tableStatResponse.MissingNodes = append(
tableStatResponse.MissingNodes,
serverpb.TableStatsResponse_MissingNode{
NodeID: resp.nodeID.String(),
ErrorMessage: resp.err.Error(),
//.........這裏部分代碼省略.........
示例12: TestLeaseExtensionNotBlockedByRead
// Test that a lease extension (a RequestLeaseRequest that doesn't change the
// lease holder) is not blocked by ongoing reads.
// The test relies on two things:
// 1) Lease extensions, unlike lease transfers, are not blocked by reads through their
// PostCommitTrigger.noConcurrentReads.
// 2) Requests with the non-KV flag, such as RequestLeaseRequest, do not
// go through the command queue.
func TestLeaseExtensionNotBlockedByRead(t *testing.T) {
defer leaktest.AfterTest(t)()
readBlocked := make(chan struct{})
cmdFilter := func(fArgs storagebase.FilterArgs) *roachpb.Error {
if fArgs.Hdr.UserPriority == 42 {
// Signal that the read is blocked.
readBlocked <- struct{}{}
// Wait for read to be unblocked.
<-readBlocked
}
return nil
}
srv, _, _ := serverutils.StartServer(t,
base.TestServerArgs{
Knobs: base.TestingKnobs{
Store: &storage.StoreTestingKnobs{
TestingCommandFilter: cmdFilter,
},
},
})
s := srv.(*server.TestServer)
defer s.Stopper().Stop()
// Start a read and wait for it to block.
key := roachpb.Key("a")
errChan := make(chan error)
go func() {
getReq := roachpb.GetRequest{
Span: roachpb.Span{
Key: key,
},
}
if _, pErr := client.SendWrappedWith(context.Background(), s.DistSender(),
roachpb.Header{UserPriority: 42},
&getReq); pErr != nil {
errChan <- pErr.GoError()
}
}()
select {
case err := <-errChan:
t.Fatal(err)
case <-readBlocked:
// Send the lease request.
rKey, err := keys.Addr(key)
if err != nil {
t.Fatal(err)
}
_, repDesc, err := s.Stores().LookupReplica(rKey, nil)
if err != nil {
t.Fatal(err)
}
leaseReq := roachpb.RequestLeaseRequest{
Span: roachpb.Span{
Key: key,
},
Lease: roachpb.Lease{
Start: s.Clock().Now(),
StartStasis: s.Clock().Now().Add(time.Second.Nanoseconds(), 0),
Expiration: s.Clock().Now().Add(2*time.Second.Nanoseconds(), 0),
Replica: repDesc,
},
}
if _, pErr := client.SendWrapped(context.Background(), s.DistSender(), &leaseReq); pErr != nil {
t.Fatal(pErr)
}
// Unblock the read.
readBlocked <- struct{}{}
}
}
示例13: meta
func meta(k roachpb.RKey) (roachpb.RKey, error) {
return keys.Addr(keys.RangeMetaKey(k))
}