本文整理汇总了Golang中github.com/cockroachdb/cockroach/pkg/roachpb.Span类的典型用法代码示例。如果您正苦于以下问题:Golang Span类的具体用法?Golang Span怎么用?Golang Span使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Span类的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: deleteAllRowsScan
func (td *tableDeleter) deleteAllRowsScan(
ctx context.Context, resume roachpb.Span, limit int64,
) (roachpb.Span, error) {
if resume.Key == nil {
tablePrefix := sqlbase.MakeIndexKeyPrefix(
td.rd.helper.tableDesc, td.rd.helper.tableDesc.PrimaryIndex.ID)
resume = roachpb.Span{Key: roachpb.Key(tablePrefix), EndKey: roachpb.Key(tablePrefix).PrefixEnd()}
}
valNeededForCol := make([]bool, len(td.rd.helper.tableDesc.Columns))
for _, idx := range td.rd.fetchColIDtoRowIndex {
valNeededForCol[idx] = true
}
var rf sqlbase.RowFetcher
err := rf.Init(
td.rd.helper.tableDesc, td.rd.fetchColIDtoRowIndex, &td.rd.helper.tableDesc.PrimaryIndex,
false, false, td.rd.fetchCols, valNeededForCol)
if err != nil {
return resume, err
}
if err := rf.StartScan(td.txn, roachpb.Spans{resume}, true /* limit batches */, 0); err != nil {
return resume, err
}
for i := int64(0); i < limit; i++ {
row, err := rf.NextRowDecoded()
if err != nil {
return resume, err
}
if row == nil {
// Done deleting all rows.
resume = roachpb.Span{}
break
}
_, err = td.row(ctx, row)
if err != nil {
return resume, err
}
}
if resume.Key != nil {
// Update the resume start key for the next iteration.
resume.Key = rf.Key()
}
return resume, td.finalize(ctx)
}
示例2: partitionSpans
// partitionSpans finds out which nodes are owners for ranges touching the given
// spans, and splits the spans according to owning nodes. The result is a set of
// spanPartitions (one for each relevant node), which form a partitioning of the
// spans (i.e. they are non-overlapping and their union is exactly the original
// set of spans).
func (dsp *distSQLPlanner) partitionSpans(
planCtx *planningCtx, spans roachpb.Spans,
) ([]spanPartition, error) {
if len(spans) == 0 {
panic("no spans")
}
ctx := planCtx.ctx
splits := make([]spanPartition, 0, 1)
// nodeMap maps a nodeID to an index inside the splits array.
nodeMap := make(map[roachpb.NodeID]int)
it := planCtx.spanIter
for _, span := range spans {
var rspan roachpb.RSpan
var err error
if rspan.Key, err = keys.Addr(span.Key); err != nil {
return nil, err
}
if rspan.EndKey, err = keys.Addr(span.EndKey); err != nil {
return nil, err
}
var lastNodeID roachpb.NodeID
for it.Seek(ctx, span, kv.Ascending); ; it.Next(ctx) {
if !it.Valid() {
return nil, it.Error()
}
replInfo, err := it.ReplicaInfo(ctx)
if err != nil {
return nil, err
}
desc := it.Desc()
var trimmedSpan roachpb.Span
if rspan.Key.Less(desc.StartKey) {
trimmedSpan.Key = desc.StartKey.AsRawKey()
} else {
trimmedSpan.Key = span.Key
}
if desc.EndKey.Less(rspan.EndKey) {
trimmedSpan.EndKey = desc.EndKey.AsRawKey()
} else {
trimmedSpan.EndKey = span.EndKey
}
nodeID := replInfo.NodeDesc.NodeID
idx, ok := nodeMap[nodeID]
if !ok {
idx = len(splits)
splits = append(splits, spanPartition{node: nodeID})
nodeMap[nodeID] = idx
if _, ok := planCtx.nodeAddresses[nodeID]; !ok {
planCtx.nodeAddresses[nodeID] = replInfo.NodeDesc.Address.String()
}
}
split := &splits[idx]
if lastNodeID == nodeID {
// Two consecutive ranges on the same node, merge the spans.
if !split.spans[len(split.spans)-1].EndKey.Equal(trimmedSpan.Key) {
log.Fatalf(ctx, "expected consecutive span pieces %v %v", split.spans, trimmedSpan)
}
split.spans[len(split.spans)-1].EndKey = trimmedSpan.EndKey
} else {
split.spans = append(split.spans, trimmedSpan)
}
lastNodeID = nodeID
if !it.NeedAnother() {
break
}
}
}
return splits, nil
}
示例3: TestTableReader
func TestTableReader(t *testing.T) {
defer leaktest.AfterTest(t)()
s, sqlDB, kvDB := serverutils.StartServer(t, base.TestServerArgs{})
defer s.Stopper().Stop()
// Create a table where each row is:
//
// | a | b | sum | s |
// |-----------------------------------------------------------------|
// | rowId/10 | rowId%10 | rowId/10 + rowId%10 | IntToEnglish(rowId) |
aFn := func(row int) parser.Datum {
return parser.NewDInt(parser.DInt(row / 10))
}
bFn := func(row int) parser.Datum {
return parser.NewDInt(parser.DInt(row % 10))
}
sumFn := func(row int) parser.Datum {
return parser.NewDInt(parser.DInt(row/10 + row%10))
}
sqlutils.CreateTable(t, sqlDB, "t",
"a INT, b INT, sum INT, s STRING, PRIMARY KEY (a,b), INDEX bs (b,s)",
99,
sqlutils.ToRowFn(aFn, bFn, sumFn, sqlutils.RowEnglishFn))
td := sqlbase.GetTableDescriptor(kvDB, "test", "t")
makeIndexSpan := func(start, end int) TableReaderSpan {
var span roachpb.Span
prefix := roachpb.Key(sqlbase.MakeIndexKeyPrefix(td, td.Indexes[0].ID))
span.Key = append(prefix, encoding.EncodeVarintAscending(nil, int64(start))...)
span.EndKey = append(span.EndKey, prefix...)
span.EndKey = append(span.EndKey, encoding.EncodeVarintAscending(nil, int64(end))...)
return TableReaderSpan{Span: span}
}
testCases := []struct {
spec TableReaderSpec
expected string
}{
{
spec: TableReaderSpec{
Filter: Expression{Expr: "@3 < 5 AND @2 != 3"}, // sum < 5 && b != 3
OutputColumns: []uint32{0, 1},
},
expected: "[[0 1] [0 2] [0 4] [1 0] [1 1] [1 2] [2 0] [2 1] [2 2] [3 0] [3 1] [4 0]]",
},
{
spec: TableReaderSpec{
Filter: Expression{Expr: "@3 < 5 AND @2 != 3"},
OutputColumns: []uint32{3}, // s
HardLimit: 4,
},
expected: "[['one'] ['two'] ['four'] ['one-zero']]",
},
{
spec: TableReaderSpec{
IndexIdx: 1,
Reverse: true,
Spans: []TableReaderSpan{makeIndexSpan(4, 6)},
Filter: Expression{Expr: "@1 < 3"}, // sum < 8
OutputColumns: []uint32{0, 1},
SoftLimit: 1,
},
expected: "[[2 5] [1 5] [0 5] [2 4] [1 4] [0 4]]",
},
}
for _, c := range testCases {
ts := c.spec
ts.Table = *td
flowCtx := FlowCtx{
Context: context.Background(),
evalCtx: &parser.EvalContext{},
txnProto: &roachpb.Transaction{},
clientDB: kvDB,
}
out := &RowBuffer{}
tr, err := newTableReader(&flowCtx, &ts, out)
if err != nil {
t.Fatal(err)
}
tr.Run(nil)
if out.err != nil {
t.Fatal(out.err)
}
if !out.closed {
t.Fatalf("output RowReceiver not closed")
}
if result := out.rows.String(); result != c.expected {
t.Errorf("invalid results: %s, expected %s'", result, c.expected)
}
}
}
示例4: TestClusterFlow
func TestClusterFlow(t *testing.T) {
defer leaktest.AfterTest(t)()
const numRows = 100
args := base.TestClusterArgs{ReplicationMode: base.ReplicationManual}
tc := serverutils.StartTestCluster(t, 3, args)
defer tc.Stopper().Stop()
sumDigitsFn := func(row int) parser.Datum {
sum := 0
for row > 0 {
sum += row % 10
row /= 10
}
return parser.NewDInt(parser.DInt(sum))
}
sqlutils.CreateTable(t, tc.ServerConn(0), "t",
"num INT PRIMARY KEY, digitsum INT, numstr STRING, INDEX s (digitsum)",
numRows,
sqlutils.ToRowFn(sqlutils.RowIdxFn, sumDigitsFn, sqlutils.RowEnglishFn))
kvDB := tc.Server(0).KVClient().(*client.DB)
desc := sqlbase.GetTableDescriptor(kvDB, "test", "t")
makeIndexSpan := func(start, end int) TableReaderSpan {
var span roachpb.Span
prefix := roachpb.Key(sqlbase.MakeIndexKeyPrefix(desc, desc.Indexes[0].ID))
span.Key = append(prefix, encoding.EncodeVarintAscending(nil, int64(start))...)
span.EndKey = append(span.EndKey, prefix...)
span.EndKey = append(span.EndKey, encoding.EncodeVarintAscending(nil, int64(end))...)
return TableReaderSpan{Span: span}
}
// Set up table readers on three hosts feeding data into a join reader on
// the third host. This is a basic test for the distributed flow
// infrastructure, including local and remote streams.
//
// Note that the ranges won't necessarily be local to the table readers, but
// that doesn't matter for the purposes of this test.
// Start a span (useful to look at spans using Lighstep).
sp, err := tracing.JoinOrNew(tracing.NewTracer(), nil, "cluster test")
if err != nil {
t.Fatal(err)
}
ctx := opentracing.ContextWithSpan(context.Background(), sp)
defer sp.Finish()
tr1 := TableReaderSpec{
Table: *desc,
IndexIdx: 1,
OutputColumns: []uint32{0, 1},
Spans: []TableReaderSpan{makeIndexSpan(0, 8)},
}
tr2 := TableReaderSpec{
Table: *desc,
IndexIdx: 1,
OutputColumns: []uint32{0, 1},
Spans: []TableReaderSpan{makeIndexSpan(8, 12)},
}
tr3 := TableReaderSpec{
Table: *desc,
IndexIdx: 1,
OutputColumns: []uint32{0, 1},
Spans: []TableReaderSpan{makeIndexSpan(12, 100)},
}
jr := JoinReaderSpec{
Table: *desc,
OutputColumns: []uint32{2},
}
txn := client.NewTxn(ctx, *kvDB)
fid := FlowID{uuid.MakeV4()}
req1 := &SetupFlowRequest{Txn: txn.Proto}
req1.Flow = FlowSpec{
FlowID: fid,
Processors: []ProcessorSpec{{
Core: ProcessorCoreUnion{TableReader: &tr1},
Output: []OutputRouterSpec{{
Type: OutputRouterSpec_MIRROR,
Streams: []StreamEndpointSpec{
{StreamID: 0, Mailbox: &MailboxSpec{TargetAddr: tc.Server(2).ServingAddr()}},
},
}},
}},
}
req2 := &SetupFlowRequest{Txn: txn.Proto}
req2.Flow = FlowSpec{
FlowID: fid,
Processors: []ProcessorSpec{{
Core: ProcessorCoreUnion{TableReader: &tr2},
Output: []OutputRouterSpec{{
Type: OutputRouterSpec_MIRROR,
Streams: []StreamEndpointSpec{
{StreamID: 1, Mailbox: &MailboxSpec{TargetAddr: tc.Server(2).ServingAddr()}},
//.........这里部分代码省略.........