本文整理汇总了Golang中github.com/cockroachdb/cockroach/pkg/internal/client.DB类的典型用法代码示例。如果您正苦于以下问题:Golang DB类的具体用法?Golang DB怎么用?Golang DB使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了DB类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: WriteStatusSummary
// WriteStatusSummary generates a summary and immediately writes it to the given
// client.
func (mr *MetricsRecorder) WriteStatusSummary(ctx context.Context, db *client.DB) error {
mr.writeSummaryMu.Lock()
defer mr.writeSummaryMu.Unlock()
nodeStatus := mr.GetStatusSummary()
if nodeStatus != nil {
key := keys.NodeStatusKey(nodeStatus.Desc.NodeID)
// We use PutInline to store only a single version of the node status.
// There's not much point in keeping the historical versions as we keep
// all of the constituent data as timeseries. Further, due to the size
// of the build info in the node status, writing one of these every 10s
// will generate more versions than will easily fit into a range over
// the course of a day.
if err := db.PutInline(ctx, key, nodeStatus); err != nil {
return err
}
if log.V(2) {
statusJSON, err := json.Marshal(nodeStatus)
if err != nil {
log.Errorf(ctx, "error marshaling nodeStatus to json: %s", err)
}
log.Infof(ctx, "node %d status: %s", nodeStatus.Desc.NodeID, statusJSON)
}
}
return nil
}
示例2: countRangeReplicas
func countRangeReplicas(db *client.DB) (int, error) {
desc := &roachpb.RangeDescriptor{}
if err := db.GetProto(context.TODO(), keys.RangeDescriptorKey(roachpb.RKeyMin), desc); err != nil {
return 0, err
}
return len(desc.Replicas), nil
}
示例3: allocateNodeID
// allocateNodeID increments the node id generator key to allocate
// a new, unique node id.
func allocateNodeID(ctx context.Context, db *client.DB) (roachpb.NodeID, error) {
r, err := db.Inc(ctx, keys.NodeIDGenerator, 1)
if err != nil {
return 0, errors.Errorf("unable to allocate node ID: %s", err)
}
return roachpb.NodeID(r.ValueInt()), nil
}
示例4: checkKeyCount
func checkKeyCount(t *testing.T, kvDB *client.DB, prefix roachpb.Key, numKeys int) {
if kvs, err := kvDB.Scan(context.TODO(), prefix, prefix.PrefixEnd(), 0); err != nil {
t.Fatal(err)
} else if l := numKeys; len(kvs) != l {
t.Fatalf("expected %d key value pairs, but got %d", l, len(kvs))
}
}
示例5: truncateTableInChunks
// truncateTableInChunks truncates the data of a table in chunks. It deletes a
// range of data for the table, which includes the PK and all indexes.
func truncateTableInChunks(
ctx context.Context, tableDesc *sqlbase.TableDescriptor, db *client.DB,
) error {
const chunkSize = TableTruncateChunkSize
var resume roachpb.Span
for row, done := 0, false; !done; row += chunkSize {
resumeAt := resume
if log.V(2) {
log.Infof(ctx, "table %s truncate at row: %d, span: %s", tableDesc.Name, row, resume)
}
if err := db.Txn(ctx, func(txn *client.Txn) error {
rd, err := makeRowDeleter(txn, tableDesc, nil, nil, false)
if err != nil {
return err
}
td := tableDeleter{rd: rd}
if err := td.init(txn); err != nil {
return err
}
resume, err = td.deleteAllRows(txn.Context, resumeAt, chunkSize)
return err
}); err != nil {
return err
}
done = resume.Key == nil
}
return nil
}
示例6: pushTxn
// pushTxn attempts to abort the txn via push. The wait group is signaled on
// completion.
func pushTxn(
ctx context.Context,
db *client.DB,
now hlc.Timestamp,
txn *roachpb.Transaction,
typ roachpb.PushTxnType,
) {
// Attempt to push the transaction which created the intent.
pushArgs := &roachpb.PushTxnRequest{
Span: roachpb.Span{
Key: txn.Key,
},
Now: now,
PusherTxn: roachpb.Transaction{TxnMeta: enginepb.TxnMeta{Priority: math.MaxInt32}},
PusheeTxn: txn.TxnMeta,
PushType: typ,
}
b := &client.Batch{}
b.AddRawRequest(pushArgs)
if err := db.Run(ctx, b); err != nil {
log.Warningf(ctx, "push of txn %s failed: %s", txn, err)
return
}
br := b.RawResponse()
// Update the supplied txn on successful push.
*txn = br.Responses[0].GetInner().(*roachpb.PushTxnResponse).PusheeTxn
}
示例7: allocateStoreIDs
// allocateStoreIDs increments the store id generator key for the
// specified node to allocate "inc" new, unique store ids. The
// first ID in a contiguous range is returned on success.
func allocateStoreIDs(
ctx context.Context, nodeID roachpb.NodeID, inc int64, db *client.DB,
) (roachpb.StoreID, error) {
r, err := db.Inc(ctx, keys.StoreIDGenerator, inc)
if err != nil {
return 0, errors.Errorf("unable to allocate %d store IDs for node %d: %s", inc, nodeID, err)
}
return roachpb.StoreID(r.ValueInt() - inc + 1), nil
}
示例8: runTxn
func (hv *historyVerifier) runTxn(
txnIdx int,
priority int32,
isolation enginepb.IsolationType,
cmds []*cmd,
db *client.DB,
t *testing.T,
) error {
var retry int
txnName := fmt.Sprintf("txn %d", txnIdx+1)
cmdIdx := -1
err := db.Txn(context.TODO(), func(txn *client.Txn) error {
// If this is 2nd attempt, and a retry wasn't expected, return a
// retry error which results in further histories being enumerated.
if retry++; retry > 1 {
if !cmds[cmdIdx].expRetry {
// Propagate retry error to history execution to enumerate all
// histories where this txn retries at this command.
return &retryError{txnIdx: txnIdx, cmdIdx: cmdIdx}
}
// We're expecting a retry, so just send nil down the done channel.
cmds[cmdIdx].done(nil)
}
txn.SetDebugName(txnName, 0)
if isolation == enginepb.SNAPSHOT {
if err := txn.SetIsolation(enginepb.SNAPSHOT); err != nil {
return err
}
}
txn.InternalSetPriority(priority)
env := map[string]int64{}
for cmdIdx+1 < len(cmds) {
cmdIdx++
cmds[cmdIdx].env = env
_, err := hv.runCmd(txn, txnIdx, retry, cmds[cmdIdx], t)
if err != nil {
if log.V(1) {
log.Infof(context.Background(), "%s: failed running %s: %s", txnName, cmds[cmdIdx], err)
}
return err
}
}
return nil
})
if err != nil {
for _, c := range cmds[cmdIdx:] {
c.done(err)
}
}
return err
}
示例9: getRangeKeys
// getRangeKeys returns the end keys of all ranges.
func getRangeKeys(db *client.DB) ([]roachpb.Key, error) {
rows, err := db.Scan(context.TODO(), keys.Meta2Prefix, keys.MetaMax, 0)
if err != nil {
return nil, err
}
ret := make([]roachpb.Key, len(rows), len(rows))
for i := 0; i < len(rows); i++ {
ret[i] = bytes.TrimPrefix(rows[i].Key, keys.Meta2Prefix)
}
return ret, nil
}
示例10: startTestWriter
// startTestWriter creates a writer which initiates a sequence of
// transactions, each which writes up to 10 times to random keys with
// random values. If not nil, txnChannel is written to non-blockingly
// every time a new transaction starts.
func startTestWriter(
db *client.DB,
i int64,
valBytes int32,
wg *sync.WaitGroup,
retries *int32,
txnChannel chan struct{},
done <-chan struct{},
t *testing.T,
) {
src := rand.New(rand.NewSource(i))
defer func() {
if wg != nil {
wg.Done()
}
}()
for j := 0; ; j++ {
select {
case <-done:
return
default:
first := true
err := db.Txn(context.TODO(), func(txn *client.Txn) error {
if first && txnChannel != nil {
select {
case txnChannel <- struct{}{}:
default:
}
} else if !first && retries != nil {
atomic.AddInt32(retries, 1)
}
first = false
for j := 0; j <= int(src.Int31n(10)); j++ {
key := randutil.RandBytes(src, 10)
val := randutil.RandBytes(src, int(src.Int31n(valBytes)))
if err := txn.Put(key, val); err != nil {
log.Infof(context.Background(), "experienced an error in routine %d: %s", i, err)
return err
}
}
return nil
})
if err != nil {
t.Error(err)
} else {
time.Sleep(1 * time.Millisecond)
}
}
}
}
示例11: WaitForInitialSplits
// WaitForInitialSplits waits for the expected number of initial ranges to be
// populated in the meta2 table. If the expected range count is not reached
// within a configured timeout, an error is returned.
func WaitForInitialSplits(db *client.DB) error {
expectedRanges := ExpectedInitialRangeCount()
return util.RetryForDuration(initialSplitsTimeout, func() error {
// Scan all keys in the Meta2Prefix; we only need a count.
rows, err := db.Scan(context.TODO(), keys.Meta2Prefix, keys.MetaMax, 0)
if err != nil {
return err
}
if a, e := len(rows), expectedRanges; a != e {
return errors.Errorf("had %d ranges at startup, expected %d", a, e)
}
return nil
})
}
示例12: purgeOldLeases
// purgeOldLeases refreshes the leases on a table. Unused leases older than
// minVersion will be released.
// If deleted is set, minVersion is ignored; no lease is acquired and all
// existing unused leases are released. The table is further marked for
// deletion, which will cause existing in-use leases to be eagerly released once
// they're not in use any more.
// If t has no active leases, nothing is done.
func (t *tableState) purgeOldLeases(
db *client.DB, deleted bool, minVersion sqlbase.DescriptorVersion, store LeaseStore,
) error {
t.mu.Lock()
empty := len(t.active.data) == 0
t.mu.Unlock()
if empty {
// We don't currently have a lease on this table, so no need to refresh
// anything.
return nil
}
// Acquire and release a lease on the table at a version >= minVersion.
var lease *LeaseState
err := db.Txn(context.TODO(), func(txn *client.Txn) error {
var err error
if !deleted {
lease, err = t.acquire(txn, minVersion, store)
if err == errTableDropped {
deleted = true
}
}
if err == nil || deleted {
t.mu.Lock()
defer t.mu.Unlock()
var toRelease []*LeaseState
if deleted {
t.deleted = true
}
toRelease = append([]*LeaseState(nil), t.active.data...)
t.releaseLeasesIfNotActive(toRelease, store)
return nil
}
return err
})
if err != nil {
return err
}
if lease == nil {
return nil
}
return t.release(lease, store)
}
示例13: LeaseInfo
// LeaseInfo runs a LeaseInfoRequest using the specified server.
func LeaseInfo(
t *testing.T,
db *client.DB,
rangeDesc roachpb.RangeDescriptor,
readConsistency roachpb.ReadConsistencyType,
) roachpb.LeaseInfoResponse {
leaseInfoReq := &roachpb.LeaseInfoRequest{
Span: roachpb.Span{
Key: rangeDesc.StartKey.AsRawKey(),
},
}
reply, pErr := client.SendWrappedWith(context.Background(), db.GetSender(), roachpb.Header{
ReadConsistency: readConsistency,
}, leaseInfoReq)
if pErr != nil {
t.Fatal(pErr)
}
return *(reply.(*roachpb.LeaseInfoResponse))
}
示例14: pruneTimeSeries
// pruneTimeSeries will prune data for the supplied set of time series. Time
// series series are identified by name and resolution.
//
// For each time series supplied, the pruning operation will delete all data
// older than a constant threshold. The threshold is different depending on the
// resolution; typically, lower-resolution time series data will be retained for
// a longer period.
//
// If data is stored at a resolution which is not known to the system, it is
// assumed that the resolution has been deprecated and all data for that time
// series at that resolution will be deleted.
//
// As range deletion of inline data is an idempotent operation, it is safe to
// run this operation concurrently on multiple nodes at the same time.
func pruneTimeSeries(
ctx context.Context, db *client.DB, timeSeriesList []timeSeriesResolutionInfo, now hlc.Timestamp,
) error {
thresholds := computeThresholds(now.WallTime)
for _, timeSeries := range timeSeriesList {
// Time series data for a specific resolution falls in a contiguous key
// range, and can be deleted with a DelRange command.
// The start key is the prefix unique to this name/resolution pair.
start := makeDataKeySeriesPrefix(timeSeries.Name, timeSeries.Resolution)
// The end key can be created by generating a time series key with the
// threshold timestamp for the resolution. If the resolution is not
// supported, the start key's PrefixEnd is used instead (which will clear
// the time series entirely).
var end roachpb.Key
threshold, ok := thresholds[timeSeries.Resolution]
if ok {
end = MakeDataKey(timeSeries.Name, "", timeSeries.Resolution, threshold)
} else {
end = start.PrefixEnd()
}
// TODO(mrtracy): There is no reason not to execute the individual
// deletes in parallel, although the best way to do that is not clear.
// See the RFC PR #9343 for details.
b := &client.Batch{}
b.AddRawRequest(&roachpb.DeleteRangeRequest{
Span: roachpb.Span{
Key: start,
EndKey: end,
},
Inline: true,
})
if err := db.Run(ctx, b); err != nil {
return err
}
}
return nil
}
示例15: runCmds
func (hv *historyVerifier) runCmds(
cmds []*cmd, db *client.DB, t *testing.T,
) (string, map[string]int64, error) {
var strs []string
env := map[string]int64{}
err := db.Txn(context.TODO(), func(txn *client.Txn) error {
for _, c := range cmds {
c.historyIdx = hv.idx
c.env = env
c.init(nil)
fmtStr, err := c.execute(txn, t)
if err != nil {
return err
}
strs = append(strs, fmt.Sprintf(fmtStr, 0, 0))
}
return nil
})
return strings.Join(strs, " "), env, err
}