本文整理匯總了Golang中github.com/cockroachdb/cockroach/internal/client.DefaultDBContext函數的典型用法代碼示例。如果您正苦於以下問題:Golang DefaultDBContext函數的具體用法?Golang DefaultDBContext怎麽用?Golang DefaultDBContext使用的例子?那麽, 這裏精選的函數代碼示例或許可以為您提供幫助。
在下文中一共展示了DefaultDBContext函數的10個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: createTestNotifyClient
// createTestNotifyClient creates a new client which connects using an HTTP
// sender to the server at addr. It contains a waitgroup to allow waiting.
func createTestNotifyClient(t *testing.T, stopper *stop.Stopper, addr string, priority roachpb.UserPriority) (*client.DB, *notifyingSender) {
db := createTestClient(t, stopper, addr)
sender := ¬ifyingSender{wrapped: db.GetSender()}
dbCtx := client.DefaultDBContext()
dbCtx.UserPriority = priority
return client.NewDBWithContext(sender, dbCtx), sender
}
示例2: resendWithTxn
// TODO(tschottdorf): this method is somewhat awkward but unless we want to
// give this error back to the client, our options are limited. We'll have to
// run the whole thing for them, or any restart will still end up at the client
// which will not be prepared to be handed a Txn.
func (tc *TxnCoordSender) resendWithTxn(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) {
// Run a one-off transaction with that single command.
if log.V(1) {
log.Infof(tc.ctx, "%s: auto-wrapping in txn and re-executing: ", ba)
}
// TODO(bdarnell): need to be able to pass other parts of DBContext
// through here.
dbCtx := client.DefaultDBContext()
dbCtx.UserPriority = ba.UserPriority
tmpDB := client.NewDBWithContext(tc, dbCtx)
var br *roachpb.BatchResponse
err := tmpDB.Txn(context.TODO(), func(txn *client.Txn) error {
txn.SetDebugName("auto-wrap", 0)
b := txn.NewBatch()
b.Header = ba.Header
for _, arg := range ba.Requests {
req := arg.GetInner()
b.AddRawRequest(req)
}
err := txn.CommitInBatch(b)
br = b.RawResponse()
return err
})
if err != nil {
return nil, roachpb.NewError(err)
}
br.Txn = nil // hide the evidence
return br, nil
}
示例3: TestClientRunTransaction
// TestClientRunTransaction verifies some simple transaction isolation
// semantics.
func TestClientRunTransaction(t *testing.T) {
defer leaktest.AfterTest(t)()
s, _, _ := serverutils.StartServer(t, base.TestServerArgs{})
defer s.Stopper().Stop()
dbCtx := client.DefaultDBContext()
dbCtx.TxnRetryOptions.InitialBackoff = 1 * time.Millisecond
db := createTestClientForUser(t, s.Stopper(), s.ServingAddr(), security.NodeUser, dbCtx)
for _, commit := range []bool{true, false} {
value := []byte("value")
key := []byte(fmt.Sprintf("%s/key-%t", testUser, commit))
// Use snapshot isolation so non-transactional read can always push.
err := db.Txn(func(txn *client.Txn) error {
if err := txn.SetIsolation(enginepb.SNAPSHOT); err != nil {
return err
}
// Put transactional value.
if err := txn.Put(key, value); err != nil {
return err
}
// Attempt to read outside of txn.
if gr, err := db.Get(key); err != nil {
return err
} else if gr.Value != nil {
return errors.Errorf("expected nil value; got %+v", gr.Value)
}
// Read within the transaction.
if gr, err := txn.Get(key); err != nil {
return err
} else if gr.Value == nil || !bytes.Equal(gr.ValueBytes(), value) {
return errors.Errorf("expected value %q; got %q", value, gr.ValueBytes())
}
if !commit {
return errors.Errorf("purposefully failing transaction")
}
return nil
})
if commit != (err == nil) {
t.Errorf("expected success? %t; got %s", commit, err)
} else if !commit && !testutils.IsError(err, "purposefully failing transaction") {
t.Errorf("unexpected failure with !commit: %s", err)
}
// Verify the value is now visible on commit == true, and not visible otherwise.
gr, err := db.Get(key)
if commit {
if err != nil || gr.Value == nil || !bytes.Equal(gr.ValueBytes(), value) {
t.Errorf("expected success reading value: %+v, %s", gr.Value, err)
}
} else {
if err != nil || gr.Value != nil {
t.Errorf("expected success and nil value: %+v, %s", gr.Value, err)
}
}
}
}
示例4: TestClientPermissions
// TestClientPermissions verifies permission enforcement.
func TestClientPermissions(t *testing.T) {
defer leaktest.AfterTest(t)()
s, _, _ := serverutils.StartServer(t, base.TestServerArgs{})
defer s.Stopper().Stop()
// NodeUser certs are required for all KV operations.
// RootUser has no KV privileges whatsoever.
nodeClient := createTestClientForUser(t, s.Stopper(), s.ServingAddr(),
security.NodeUser, client.DefaultDBContext())
rootClient := createTestClientForUser(t, s.Stopper(), s.ServingAddr(),
security.RootUser, client.DefaultDBContext())
testCases := []struct {
path string
client *client.DB
allowed bool
}{
{"foo", rootClient, false},
{"foo", nodeClient, true},
{testUser + "/foo", rootClient, false},
{testUser + "/foo", nodeClient, true},
{testUser + "foo", rootClient, false},
{testUser + "foo", nodeClient, true},
{testUser, rootClient, false},
{testUser, nodeClient, true},
{"unknown/foo", rootClient, false},
{"unknown/foo", nodeClient, true},
}
value := []byte("value")
const matchErr = "is not allowed"
for tcNum, tc := range testCases {
err := tc.client.Put(tc.path, value)
if (err == nil) != tc.allowed || (!tc.allowed && !testutils.IsError(err, matchErr)) {
t.Errorf("#%d: expected allowed=%t, got err=%s", tcNum, tc.allowed, err)
}
_, err = tc.client.Get(tc.path)
if (err == nil) != tc.allowed || (!tc.allowed && !testutils.IsError(err, matchErr)) {
t.Errorf("#%d: expected allowed=%t, got err=%s", tcNum, tc.allowed, err)
}
}
}
示例5: TestRangeSplitsWithWritePressure
// TestRangeSplitsWithWritePressure sets the zone config max bytes for
// a range to 256K and writes data until there are five ranges.
func TestRangeSplitsWithWritePressure(t *testing.T) {
defer leaktest.AfterTest(t)()
// Override default zone config.
cfg := config.DefaultZoneConfig()
cfg.RangeMaxBytes = 1 << 18
defer config.TestingSetDefaultZoneConfig(cfg)()
dbCtx := client.DefaultDBContext()
dbCtx.TxnRetryOptions = retry.Options{
InitialBackoff: 1 * time.Millisecond,
MaxBackoff: 10 * time.Millisecond,
Multiplier: 2,
}
s, _ := createTestDBWithContext(t, dbCtx)
// This is purely to silence log spam.
config.TestingSetupZoneConfigHook(s.Stopper)
defer s.Stop()
// Start test writer write about a 32K/key so there aren't too many writes necessary to split 64K range.
done := make(chan struct{})
var wg sync.WaitGroup
wg.Add(1)
go startTestWriter(s.DB, int64(0), 1<<15, &wg, nil, nil, done, t)
// Check that we split 5 times in allotted time.
util.SucceedsSoon(t, func() error {
// Scan the txn records.
rows, err := s.DB.Scan(keys.Meta2Prefix, keys.MetaMax, 0)
if err != nil {
return errors.Errorf("failed to scan meta2 keys: %s", err)
}
if lr := len(rows); lr < 5 {
return errors.Errorf("expected >= 5 scans; got %d", lr)
}
return nil
})
close(done)
wg.Wait()
// This write pressure test often causes splits while resolve
// intents are in flight, causing them to fail with range key
// mismatch errors. However, LocalSender should retry in these
// cases. Check here via MVCC scan that there are no dangling write
// intents. We do this using a SucceedsSoon construct to account
// for timing of finishing the test writer and a possibly-ongoing
// asynchronous split.
util.SucceedsSoon(t, func() error {
if _, _, err := engine.MVCCScan(context.Background(), s.Eng, keys.LocalMax, roachpb.KeyMax, 0, hlc.MaxTimestamp, true, nil); err != nil {
return errors.Errorf("failed to verify no dangling intents: %s", err)
}
return nil
})
}
示例6: Start
// Start starts the test cluster by bootstrapping an in-memory store
// (defaults to maximum of 50M). The server is started, launching the
// node RPC server and all HTTP endpoints. Use the value of
// TestServer.Addr after Start() for client connections. Use Stop()
// to shutdown the server after the test completes.
func (ltc *LocalTestCluster) Start(t util.Tester, baseCtx *base.Context, initSender InitSenderFn) {
nodeID := roachpb.NodeID(1)
nodeDesc := &roachpb.NodeDescriptor{NodeID: nodeID}
tracer := tracing.NewTracer()
ltc.tester = t
ltc.Manual = hlc.NewManualClock(0)
ltc.Clock = hlc.NewClock(ltc.Manual.UnixNano)
ltc.Stopper = stop.NewStopper()
rpcContext := rpc.NewContext(baseCtx, ltc.Clock, ltc.Stopper)
server := rpc.NewServer(rpcContext) // never started
ltc.Gossip = gossip.New(
context.Background(), rpcContext, server, nil, ltc.Stopper, metric.NewRegistry())
ltc.Eng = engine.NewInMem(roachpb.Attributes{}, 50<<20, ltc.Stopper)
ltc.Stores = storage.NewStores(ltc.Clock)
ltc.Sender = initSender(nodeDesc, tracer, ltc.Clock, ltc.Latency, ltc.Stores, ltc.Stopper,
ltc.Gossip)
if ltc.DBContext == nil {
dbCtx := client.DefaultDBContext()
ltc.DBContext = &dbCtx
}
ltc.DB = client.NewDBWithContext(ltc.Sender, *ltc.DBContext)
transport := storage.NewDummyRaftTransport()
ctx := storage.TestStoreContext()
if ltc.RangeRetryOptions != nil {
ctx.RangeRetryOptions = *ltc.RangeRetryOptions
}
ctx.Ctx = tracing.WithTracer(context.Background(), tracer)
ctx.Clock = ltc.Clock
ctx.DB = ltc.DB
ctx.Gossip = ltc.Gossip
ctx.Transport = transport
ltc.Store = storage.NewStore(ctx, ltc.Eng, nodeDesc)
if err := ltc.Store.Bootstrap(roachpb.StoreIdent{NodeID: nodeID, StoreID: 1}, ltc.Stopper); err != nil {
t.Fatalf("unable to start local test cluster: %s", err)
}
ltc.Stores.AddStore(ltc.Store)
if err := ltc.Store.BootstrapRange(nil); err != nil {
t.Fatalf("unable to start local test cluster: %s", err)
}
if err := ltc.Store.Start(context.Background(), ltc.Stopper); err != nil {
t.Fatalf("unable to start local test cluster: %s", err)
}
ltc.Gossip.SetNodeID(nodeDesc.NodeID)
if err := ltc.Gossip.SetNodeDescriptor(nodeDesc); err != nil {
t.Fatalf("unable to set node descriptor: %s", err)
}
}
示例7: checkConcurrency
// checkConcurrency creates a history verifier, starts a new database
// and runs the verifier.
func checkConcurrency(
name string,
isolations []enginepb.IsolationType,
txns []string,
verify *verifier,
t *testing.T,
) {
verifier := newHistoryVerifier(name, txns, verify, t)
dbCtx := client.DefaultDBContext()
dbCtx.TxnRetryOptions = correctnessTestRetryOptions
s, _ := createTestDBWithContext(t, dbCtx)
defer s.Stop()
setCorrectnessRetryOptions(s.Stores)
verifier.run(isolations, s.DB, t)
}
示例8: checkConcurrency
// checkConcurrency creates a history verifier, starts a new database
// and runs the verifier.
func checkConcurrency(
name string,
isolations []enginepb.IsolationType,
txns []string,
verify *verifier,
t *testing.T,
) {
verifier := newHistoryVerifier(name, txns, verify, t)
dbCtx := client.DefaultDBContext()
dbCtx.TxnRetryOptions = correctnessTestRetryOptions
s := &localtestcluster.LocalTestCluster{
DBContext: &dbCtx,
RangeRetryOptions: &correctnessTestRetryOptions,
}
s.Start(t, testutils.NewNodeTestBaseContext(), InitSenderForLocalTestCluster)
defer s.Stop()
verifier.run(isolations, s.DB, t)
}
示例9: createTestClient
func createTestClient(t *testing.T, stopper *stop.Stopper, addr string) *client.DB {
return createTestClientForUser(t, stopper, addr, security.NodeUser, client.DefaultDBContext())
}
示例10: createTestDB
// createTestDB creates a local test server and starts it. The caller
// is responsible for stopping the test server.
func createTestDB(t testing.TB) (*localtestcluster.LocalTestCluster, *TxnCoordSender) {
return createTestDBWithContext(t, client.DefaultDBContext())
}