本文整理匯總了Golang中github.com/cockroachdb/cockroach/client.NewTxn函數的典型用法代碼示例。如果您正苦於以下問題:Golang NewTxn函數的具體用法?Golang NewTxn怎麽用?Golang NewTxn使用的例子?那麽, 這裏精選的函數代碼示例或許可以為您提供幫助。
在下文中一共展示了NewTxn函數的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: TestTxnCoordSenderCleanupOnAborted
// TestTxnCoordSenderCleanupOnAborted verifies that if a txn receives a
// TransactionAbortedError, the coordinator cleans up the transaction.
func TestTxnCoordSenderCleanupOnAborted(t *testing.T) {
defer leaktest.AfterTest(t)
s := createTestDB(t)
defer s.Stop()
// Create a transaction with intent at "a".
key := roachpb.Key("a")
txn1 := client.NewTxn(*s.DB)
txn1.InternalSetPriority(1)
if pErr := txn1.Put(key, []byte("value")); pErr != nil {
t.Fatal(pErr)
}
// Push the transaction (by writing key "a" with higher priority) to abort it.
txn2 := client.NewTxn(*s.DB)
txn2.InternalSetPriority(2)
if pErr := txn2.Put(key, []byte("value2")); pErr != nil {
t.Fatal(pErr)
}
// Now end the transaction and verify we've cleanup up, even though
// end transaction failed.
pErr := txn1.Commit()
switch pErr.GoError().(type) {
case *roachpb.TransactionAbortedError:
// Expected
default:
t.Fatalf("expected transaction aborted error; got %s", pErr)
}
if pErr := txn2.Commit(); pErr != nil {
t.Fatal(pErr)
}
verifyCleanup(key, s.Sender, s.Eng, t)
}
示例2: TestTxnCoordSenderCleanupOnAborted
// TestTxnCoordSenderCleanupOnAborted verifies that if a txn receives a
// TransactionAbortedError, the coordinator cleans up the transaction.
func TestTxnCoordSenderCleanupOnAborted(t *testing.T) {
defer leaktest.AfterTest(t)()
s, sender := createTestDB(t)
defer s.Stop()
// Create a transaction with intent at "a".
key := roachpb.Key("a")
txn1 := client.NewTxn(context.Background(), *s.DB)
txn1.InternalSetPriority(1)
if err := txn1.Put(key, []byte("value")); err != nil {
t.Fatal(err)
}
// Push the transaction (by writing key "a" with higher priority) to abort it.
txn2 := client.NewTxn(context.Background(), *s.DB)
txn2.InternalSetPriority(2)
if err := txn2.Put(key, []byte("value2")); err != nil {
t.Fatal(err)
}
// Now end the transaction and verify we've cleanup up, even though
// end transaction failed.
err := txn1.CommitOrCleanup()
assertTransactionAbortedError(t, err)
if err := txn2.CommitOrCleanup(); err != nil {
t.Fatal(err)
}
verifyCleanup(key, sender, s.Eng, t)
}
示例3: TestTxnCoordSenderBeginTransaction
// TestTxnCoordSenderBeginTransaction verifies that a command sent with a
// not-nil Txn with empty ID gets a new transaction initialized.
func TestTxnCoordSenderBeginTransaction(t *testing.T) {
defer leaktest.AfterTest(t)
s := createTestDB(t)
defer s.Stop()
defer teardownHeartbeats(s.Sender)
txn := client.NewTxn(*s.DB)
// Put request will create a new transaction.
key := roachpb.Key("key")
txn.InternalSetPriority(10)
txn.Proto.Isolation = roachpb.SNAPSHOT
txn.Proto.Name = "test txn"
if err := txn.Put(key, []byte("value")); err != nil {
t.Fatal(err)
}
if txn.Proto.Name != "test txn" {
t.Errorf("expected txn name to be %q; got %q", "test txn", txn.Proto.Name)
}
if txn.Proto.Priority != 10 {
t.Errorf("expected txn priority 10; got %d", txn.Proto.Priority)
}
if !bytes.Equal(txn.Proto.Key, key) {
t.Errorf("expected txn Key to match %q != %q", key, txn.Proto.Key)
}
if txn.Proto.Isolation != roachpb.SNAPSHOT {
t.Errorf("expected txn isolation to be SNAPSHOT; got %s", txn.Proto.Isolation)
}
}
示例4: TestTxnCoordSenderGCTimeout
// TestTxnCoordSenderGCTimeout verifies that the coordinator cleans up extant
// transactions and intents after the lastUpdateNanos exceeds the timeout.
func TestTxnCoordSenderGCTimeout(t *testing.T) {
defer leaktest.AfterTest(t)()
s, sender := createTestDB(t)
defer s.Stop()
// Set heartbeat interval to 1ms for testing.
sender.heartbeatInterval = 1 * time.Millisecond
txn := client.NewTxn(context.Background(), *s.DB)
key := roachpb.Key("a")
if err := txn.Put(key, []byte("value")); err != nil {
t.Fatal(err)
}
// Now, advance clock past the default client timeout.
// Locking the TxnCoordSender to prevent a data race.
sender.Lock()
s.Manual.Set(defaultClientTimeout.Nanoseconds() + 1)
sender.Unlock()
txnID := *txn.Proto.ID
util.SucceedsSoon(t, func() error {
// Locking the TxnCoordSender to prevent a data race.
sender.Lock()
_, ok := sender.txns[txnID]
sender.Unlock()
if ok {
return util.Errorf("expected garbage collection")
}
return nil
})
verifyCleanup(key, sender, s.Eng, t)
}
示例5: TestTxnCoordSenderHeartbeat
// TestTxnCoordSenderHeartbeat verifies periodic heartbeat of the
// transaction record.
func TestTxnCoordSenderHeartbeat(t *testing.T) {
defer leaktest.AfterTest(t)()
s := createTestDB(t)
defer s.Stop()
defer teardownHeartbeats(s.Sender)
// Set heartbeat interval to 1ms for testing.
s.Sender.heartbeatInterval = 1 * time.Millisecond
initialTxn := client.NewTxn(*s.DB)
if err := initialTxn.Put(roachpb.Key("a"), []byte("value")); err != nil {
t.Fatal(err)
}
// Verify 3 heartbeats.
var heartbeatTS roachpb.Timestamp
for i := 0; i < 3; i++ {
util.SucceedsSoon(t, func() error {
ok, txn, pErr := getTxn(s.Sender, &initialTxn.Proto)
if !ok || pErr != nil {
t.Fatalf("got txn: %t: %s", ok, pErr)
}
// Advance clock by 1ns.
// Locking the TxnCoordSender to prevent a data race.
s.Sender.Lock()
s.Manual.Increment(1)
s.Sender.Unlock()
if heartbeatTS.Less(*txn.LastHeartbeat) {
heartbeatTS = *txn.LastHeartbeat
return nil
}
return util.Errorf("expected heartbeat")
})
}
}
示例6: TestTxnCoordSenderGC
// TestTxnCoordSenderGC verifies that the coordinator cleans up extant
// transactions after the lastUpdateNanos exceeds the timeout.
func TestTxnCoordSenderGC(t *testing.T) {
defer leaktest.AfterTest(t)
s := createTestDB(t)
defer s.Stop()
// Set heartbeat interval to 1ms for testing.
s.Sender.heartbeatInterval = 1 * time.Millisecond
txn := client.NewTxn(*s.DB)
if pErr := txn.Put(roachpb.Key("a"), []byte("value")); pErr != nil {
t.Fatal(pErr)
}
// Now, advance clock past the default client timeout.
// Locking the TxnCoordSender to prevent a data race.
s.Sender.Lock()
s.Manual.Set(defaultClientTimeout.Nanoseconds() + 1)
s.Sender.Unlock()
if err := util.IsTrueWithin(func() bool {
// Locking the TxnCoordSender to prevent a data race.
s.Sender.Lock()
_, ok := s.Sender.txns[string(txn.Proto.ID)]
s.Sender.Unlock()
return !ok
}, 50*time.Millisecond); err != nil {
t.Error("expected garbage collection")
}
}
示例7: TestTxnRestartCount
func TestTxnRestartCount(t *testing.T) {
defer leaktest.AfterTest(t)()
_, sender, cleanupFn := setupMetricsTest(t)
defer cleanupFn()
key := []byte("key-restart")
value := []byte("value")
db := client.NewDB(sender)
// Start a transaction and do a GET. This forces a timestamp to be chosen for the transaction.
txn := client.NewTxn(*db)
if _, err := txn.Get(key); err != nil {
t.Fatal(err)
}
// Outside of the transaction, read the same key as was read within the transaction. This
// means that future attempts to write will increase the timestamp.
if _, err := db.Get(key); err != nil {
t.Fatal(err)
}
// This put increases the candidate timestamp, which causes an immediate restart.
if err := txn.Put(key, value); err == nil {
t.Fatalf("unexpected success")
}
teardownHeartbeats(sender)
checkTxnMetrics(t, sender, "restart txn", 0, 1, 0, 1)
}
示例8: ExecuteStatements
// ExecuteStatements executes the given statement(s) and returns a response.
// On error, the returned integer is an HTTP error code.
func (e *Executor) ExecuteStatements(
user string, session *Session, stmts string,
params []parser.Datum) StatementResults {
planMaker := plannerPool.Get().(*planner)
defer releasePlanner(planMaker)
cfg, cache := e.getSystemConfig()
*planMaker = planner{
user: user,
evalCtx: parser.EvalContext{
NodeID: e.nodeID,
ReCache: e.reCache,
GetLocation: session.getLocation,
},
leaseMgr: e.ctx.LeaseManager,
systemConfig: cfg,
databaseCache: cache,
session: session,
}
curTxnState := txnState{
txn: nil,
aborted: session.Txn.TxnAborted,
}
if txnProto := session.Txn.Txn; txnProto != nil {
// A pending transaction is already present, resume it.
curTxnState.txn = client.NewTxn(*e.ctx.DB)
curTxnState.txn.Proto = *txnProto
curTxnState.txn.UserPriority = session.Txn.UserPriority
if session.Txn.MutatesSystemConfig {
curTxnState.txn.SetSystemConfigTrigger()
}
planMaker.setTxn(curTxnState.txn)
}
session.Txn = Session_Transaction{}
// Send the Request for SQL execution and set the application-level error
// for each result in the reply.
planMaker.params = parameters(params)
res := e.execRequest(&curTxnState, stmts, planMaker)
// Send back the session state even if there were application-level errors.
// Add transaction to session state.
session.Txn.TxnAborted = curTxnState.aborted
if curTxnState.txn != nil {
// TODO(pmattis): Need to associate the leases used by a transaction with
// the session state.
planMaker.releaseLeases()
session.Txn.Txn = &curTxnState.txn.Proto
session.Txn.UserPriority = curTxnState.txn.UserPriority
session.Txn.MutatesSystemConfig = curTxnState.txn.SystemConfigTrigger()
} else {
session.Txn.Txn = nil
session.Txn.MutatesSystemConfig = false
}
return res
}
示例9: TestTxnCoordSenderAddIntentOnError
// TestTxnCoordSenderAddIntentOnError verifies that intents are tracked if
// the transaction is, even on error.
func TestTxnCoordSenderAddIntentOnError(t *testing.T) {
defer leaktest.AfterTest(t)
s := createTestDB(t)
defer s.Stop()
// Create a transaction with intent at "a".
key := roachpb.Key("x")
txn := client.NewTxn(*s.DB)
// Write so that the coordinator begins tracking this txn.
if err := txn.Put("x", "y"); err != nil {
t.Fatal(err)
}
err, ok := txn.CPut(key, []byte("x"), []byte("born to fail")).GoError().(*roachpb.ConditionFailedError)
if !ok {
t.Fatal(err)
}
s.Sender.Lock()
intentSpans := s.Sender.txns[string(txn.Proto.ID)].intentSpans()
expSpans := []roachpb.Span{{Key: key, EndKey: []byte("")}}
equal := !reflect.DeepEqual(intentSpans, expSpans)
s.Sender.Unlock()
if pErr := txn.Rollback(); pErr != nil {
t.Fatal(pErr)
}
if !equal {
t.Fatalf("expected stored intents %v, got %v", expSpans, intentSpans)
}
}
示例10: TestTxnInitialTimestamp
// TestTxnInitialTimestamp verifies that the timestamp requested
// before the Txn is created is honored.
func TestTxnInitialTimestamp(t *testing.T) {
defer leaktest.AfterTest(t)()
s, sender := createTestDB(t)
defer s.Stop()
defer teardownHeartbeats(sender)
txn := client.NewTxn(context.Background(), *s.DB)
// Request a specific timestamp.
refTimestamp := roachpb.Timestamp{WallTime: 42, Logical: 69}
txn.Proto.OrigTimestamp = refTimestamp
// Put request will create a new transaction.
key := roachpb.Key("key")
txn.InternalSetPriority(10)
txn.Proto.Isolation = roachpb.SNAPSHOT
txn.Proto.Name = "test txn"
if err := txn.Put(key, []byte("value")); err != nil {
t.Fatal(err)
}
if txn.Proto.OrigTimestamp != refTimestamp {
t.Errorf("expected txn orig ts to be %s; got %s", refTimestamp, txn.Proto.OrigTimestamp)
}
if txn.Proto.Timestamp != refTimestamp {
t.Errorf("expected txn ts to be %s; got %s", refTimestamp, txn.Proto.Timestamp)
}
}
示例11: Prepare
// Prepare returns the result types of the given statement. Args may be a
// partially populated val args map. Prepare will populate the missing val
// args. The column result types are returned (or nil if there are no results).
func (e *Executor) Prepare(query string, session *Session, args parser.MapArgs) (
[]ResultColumn, *roachpb.Error) {
stmt, err := parser.ParseOne(query, parser.Syntax(session.Syntax))
if err != nil {
return nil, roachpb.NewError(err)
}
session.planner.resetForBatch(e)
session.planner.evalCtx.Args = args
session.planner.evalCtx.PrepareOnly = true
// TODO(andrei): does the prepare phase really need a Txn?
txn := client.NewTxn(*e.ctx.DB)
txn.Proto.Isolation = session.DefaultIsolationLevel
session.planner.setTxn(txn)
defer session.planner.setTxn(nil)
plan, pErr := session.planner.prepare(stmt)
if pErr != nil {
return nil, pErr
}
if plan == nil {
return nil, nil
}
cols := plan.Columns()
for _, c := range cols {
if err := checkResultDatum(c.Typ); err != nil {
return nil, roachpb.NewError(err)
}
}
return cols, nil
}
示例12: TestTxnCoordIdempotentCleanup
// TestTxnCoordIdempotentCleanup verifies that cleanupTxnLocked is idempotent.
func TestTxnCoordIdempotentCleanup(t *testing.T) {
defer leaktest.AfterTest(t)()
s, sender := createTestDB(t)
defer s.Stop()
defer teardownHeartbeats(sender)
txn := client.NewTxn(context.Background(), *s.DB)
ba := txn.NewBatch()
ba.Put(roachpb.Key("a"), []byte("value"))
if err := txn.Run(ba); err != nil {
t.Fatal(err)
}
sender.Lock()
// Clean up twice successively.
sender.cleanupTxnLocked(context.Background(), txn.Proto)
sender.cleanupTxnLocked(context.Background(), txn.Proto)
sender.Unlock()
// For good measure, try to commit (which cleans up once more if it
// succeeds, which it may not if the previous cleanup has already
// terminated the heartbeat goroutine)
ba = txn.NewBatch()
ba.AddRawRequest(&roachpb.EndTransactionRequest{})
err := txn.Run(ba)
if err != nil && !testutils.IsError(err, errNoState.Error()) {
t.Fatal(err)
}
}
示例13: TestTxnCoordSenderEndTxn
// TestTxnCoordSenderEndTxn verifies that ending a transaction
// sends resolve write intent requests and removes the transaction
// from the txns map.
func TestTxnCoordSenderEndTxn(t *testing.T) {
defer leaktest.AfterTest(t)
s := createTestDB(t)
defer s.Stop()
// 4 cases: no deadline, past deadline, equal deadline, future deadline.
for i := 0; i < 4; i++ {
key := roachpb.Key("key: " + strconv.Itoa(i))
txn := client.NewTxn(*s.DB)
// Initialize the transaction
if err := txn.Put(key, []byte("value")); err != nil {
t.Fatal(err)
}
var deadline *roachpb.Timestamp
switch i {
case 0:
// No deadline.
case 1:
// Past deadline.
ts := txn.Proto.Timestamp.Prev()
deadline = &ts
case 2:
// Equal deadline.
deadline = &txn.Proto.Timestamp
case 3:
// Future deadline.
ts := txn.Proto.Timestamp.Next()
deadline = &ts
}
{
err := txn.CommitBy(deadline)
switch i {
case 0:
// No deadline.
if err != nil {
t.Error(err)
}
case 1:
// Past deadline.
if err != nil {
t.Error(err)
}
case 2:
// Equal deadline.
if err != nil {
t.Error(err)
}
case 3:
// Future deadline.
if _, ok := err.(*roachpb.TransactionAbortedError); !ok {
t.Errorf("expected TransactionAbortedError but got %T: %s", err, err)
}
}
}
verifyCleanup(key, s.Sender, s.Eng, t)
}
}
示例14: reset
// reset creates a new Txn and initializes it using the session defaults.
func (ts *txnState) reset(ctx context.Context, e *Executor, s *Session) {
*ts = txnState{}
ts.txn = client.NewTxn(ctx, *e.ctx.DB)
ts.txn.Proto.Isolation = s.DefaultIsolationLevel
ts.tr = s.Trace
// Discard the old schemaChangers, if any.
ts.schemaChangers = schemaChangerCollection{}
}
示例15: ExecuteStatements
// ExecuteStatements executes the given statement(s) and returns a response.
// On error, the returned integer is an HTTP error code.
func (e *Executor) ExecuteStatements(user string, session Session, stmts string, params []parser.Datum) (Response, int, error) {
planMaker := plannerPool.Get().(*planner)
defer plannerPool.Put(planMaker)
*planMaker = planner{
user: user,
evalCtx: parser.EvalContext{
NodeID: e.nodeID,
ReCache: e.reCache,
// Copy existing GetLocation closure. See plannerPool.New() for the
// initial setting.
GetLocation: planMaker.evalCtx.GetLocation,
},
leaseMgr: e.leaseMgr,
systemConfig: e.getSystemConfig(),
session: session,
}
// Resume a pending transaction if present.
if planMaker.session.Txn != nil {
txn := client.NewTxn(e.db)
txn.Proto = planMaker.session.Txn.Txn
txn.UserPriority = planMaker.session.Txn.UserPriority
if planMaker.session.MutatesSystemConfig {
txn.SetSystemConfigTrigger()
}
planMaker.setTxn(txn, planMaker.session.Txn.Timestamp.GoTime())
}
// Send the Request for SQL execution and set the application-level error
// for each result in the reply.
planMaker.params = parameters(params)
reply := e.execStmts(stmts, planMaker)
// Send back the session state even if there were application-level errors.
// Add transaction to session state.
if planMaker.txn != nil {
// TODO(pmattis): Need to record the leases used by a transaction within
// the transaction state and restore it when the transaction is restored.
planMaker.releaseLeases(e.db)
planMaker.session.Txn = &Session_Transaction{
Txn: planMaker.txn.Proto,
Timestamp: driver.Timestamp(planMaker.evalCtx.TxnTimestamp.Time),
UserPriority: planMaker.txn.UserPriority,
}
planMaker.session.MutatesSystemConfig = planMaker.txn.SystemConfigTrigger()
} else {
planMaker.session.Txn = nil
planMaker.session.MutatesSystemConfig = false
}
bytes, err := proto.Marshal(&planMaker.session)
if err != nil {
return Response{}, http.StatusInternalServerError, err
}
reply.Session = bytes
return reply, 0, nil
}