本文整理汇总了Golang中github.com/cockroachdb/cockroach/client.NewDBWithPriority函数的典型用法代码示例。如果您正苦于以下问题:Golang NewDBWithPriority函数的具体用法?Golang NewDBWithPriority怎么用?Golang NewDBWithPriority使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了NewDBWithPriority函数的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: resendWithTxn
// TODO(tschottdorf): this method is somewhat awkward but unless we want to
// give this error back to the client, our options are limited. We'll have to
// run the whole thing for them, or any restart will still end up at the client
// which will not be prepared to be handed a Txn.
func (tc *TxnCoordSender) resendWithTxn(ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) {
// Run a one-off transaction with that single command.
if log.V(1) {
log.Infof("%s: auto-wrapping in txn and re-executing: ", ba)
}
tmpDB := client.NewDBWithPriority(tc, ba.UserPriority)
var br *roachpb.BatchResponse
pErr := tmpDB.Txn(func(txn *client.Txn) *roachpb.Error {
txn.SetDebugName("auto-wrap", 0)
b := txn.NewBatch()
b.MaxScanResults = ba.MaxScanResults
for _, arg := range ba.Requests {
req := arg.GetInner()
b.InternalAddRequest(req)
}
var pErr *roachpb.Error
br, pErr = txn.CommitInBatchWithResponse(b)
return pErr
})
if pErr != nil {
return nil, pErr
}
br.Txn = nil // hide the evidence
return br, nil
}
示例2: createTestNotifyClient
// createTestNotifyClient creates a new client which connects using an HTTP
// sender to the server at addr. It contains a waitgroup to allow waiting.
func createTestNotifyClient(stopper *stop.Stopper, addr string, priority roachpb.UserPriority) (*client.DB, *notifyingSender) {
db, err := client.Open(stopper, fmt.Sprintf("rpcs://%[email protected]%s?certs=%s",
security.NodeUser,
addr,
security.EmbeddedCertsDir))
if err != nil {
log.Fatal(err)
}
sender := ¬ifyingSender{wrapped: db.GetSender()}
return client.NewDBWithPriority(sender, priority), sender
}
示例3: resendWithTxn
// TODO(tschottdorf): this method is somewhat awkward but unless we want to
// give this error back to the client, our options are limited. We'll have to
// run the whole thing for them, or any restart will still end up at the client
// which will not be prepared to be handed a Txn.
func (tc *TxnCoordSender) resendWithTxn(ba proto.BatchRequest) (*proto.BatchResponse, error) {
// Run a one-off transaction with that single command.
if log.V(1) {
log.Infof("%s: auto-wrapping in txn and re-executing: ", ba)
}
tmpDB := client.NewDBWithPriority(tc, ba.GetUserPriority())
br := &proto.BatchResponse{}
if err := tmpDB.Txn(func(txn *client.Txn) error {
txn.SetDebugName("auto-wrap", 0)
b := &client.Batch{}
for _, arg := range ba.Requests {
req := arg.GetValue().(proto.Request)
call := proto.Call{Args: req, Reply: req.CreateReply()}
b.InternalAddCall(call)
br.Add(call.Reply)
}
return txn.CommitInBatch(b)
}); err != nil {
return nil, err
}
return br, nil
}
示例4: resendWithTxn
// TODO(tschottdorf): this method is somewhat awkward but unless we want to
// give this error back to the client, our options are limited. We'll have to
// run the whole thing for them, or any restart will still end up at the client
// which will not be prepared to be handed a Txn.
func (tc *TxnCoordSender) resendWithTxn(ba proto.BatchRequest) (*proto.BatchResponse, *proto.Error) {
// Run a one-off transaction with that single command.
if log.V(1) {
log.Infof("%s: auto-wrapping in txn and re-executing: ", ba)
}
tmpDB := client.NewDBWithPriority(tc, ba.GetUserPriority())
var br *proto.BatchResponse
err := tmpDB.Txn(func(txn *client.Txn) error {
txn.SetDebugName("auto-wrap", 0)
b := &client.Batch{}
for _, arg := range ba.Requests {
req := arg.GetInner()
b.InternalAddRequest(req)
}
var err error
br, err = txn.CommitInBatchWithResponse(b)
return err
})
if err != nil {
return nil, proto.NewError(err)
}
br.Txn = nil // hide the evidence
return br, nil
}
示例5: createTestNotifyClient
// createTestNotifyClient creates a new client which connects using an HTTP
// sender to the server at addr. It contains a waitgroup to allow waiting.
func createTestNotifyClient(t *testing.T, stopper *stop.Stopper, addr string, priority roachpb.UserPriority) (*client.DB, *notifyingSender) {
db := createTestClient(t, stopper, addr)
sender := ¬ifyingSender{wrapped: db.GetSender()}
return client.NewDBWithPriority(sender, priority), sender
}
示例6: sendOne
//.........这里部分代码省略.........
// If this transactional command leaves transactional intents, add the key
// or key range to the intents map. If the transaction metadata doesn't yet
// exist, create it.
if call.Reply.Header().GoError() == nil {
if proto.IsTransactionWrite(call.Args) {
if txnMeta == nil {
txn.Writing = true
trace.Event("coordinator spawns")
txnMeta = &txnMetadata{
txn: *txn,
keys: cache.NewIntervalCache(cache.Config{Policy: cache.CacheNone}),
firstUpdateNanos: tc.clock.PhysicalNow(),
lastUpdateNanos: tc.clock.PhysicalNow(),
timeoutDuration: tc.clientTimeout,
txnEnd: make(chan struct{}),
}
tc.txns[id] = txnMeta
if !tc.stopper.RunAsyncTask(func() {
tc.heartbeatLoop(id)
}) {
// The system is already draining and we can't start the
// heartbeat. We refuse new transactions for now because
// they're likely not going to have all intents committed.
// In principle, we can relax this as needed though.
call.Reply.Header().SetGoError(&proto.NodeUnavailableError{})
tc.Unlock()
tc.unregisterTxn(id)
return
}
}
txnMeta.addKeyRange(header.Key, header.EndKey)
}
// Update our record of this transaction.
if txnMeta != nil {
txnMeta.txn = *txn
txnMeta.setLastUpdate(tc.clock.PhysicalNow())
}
}
tc.Unlock()
}
// Cleanup intents and transaction map if end of transaction.
switch t := call.Reply.Header().GoError().(type) {
case *proto.TransactionStatusError:
// Likely already committed or more obscure errors such as epoch or
// timestamp regressions; consider it dead.
tc.cleanupTxn(trace, t.Txn)
case *proto.TransactionAbortedError:
// If already aborted, cleanup the txn on this TxnCoordSender.
tc.cleanupTxn(trace, t.Txn)
case *proto.OpRequiresTxnError:
// Run a one-off transaction with that single command.
if log.V(1) {
log.Infof("%s: auto-wrapping in txn and re-executing", call.Method())
}
// TODO(tschottdorf): this part is awkward. Consider resending here
// without starting a new call, which is hard to trace. Plus, the
// below depends on default configuration.
tmpDB := client.NewDBWithPriority(tc, call.Args.Header().GetUserPriority())
call.Reply.Reset()
if err := tmpDB.Txn(func(txn *client.Txn) error {
txn.SetDebugName("auto-wrap", 0)
b := &client.Batch{}
b.InternalAddCall(call)
return txn.CommitInBatch(b)
}); err != nil {
log.Warning(err)
}
case nil:
if txn := call.Reply.Header().Txn; txn != nil {
if _, ok := call.Args.(*proto.EndTransactionRequest); ok {
// If the --linearizable flag is set, we want to make sure that
// all the clocks in the system are past the commit timestamp
// of the transaction. This is guaranteed if either
// - the commit timestamp is MaxOffset behind startNS
// - MaxOffset ns were spent in this function
// when returning to the client. Below we choose the option
// that involves less waiting, which is likely the first one
// unless a transaction commits with an odd timestamp.
if tsNS := txn.Timestamp.WallTime; startNS > tsNS {
startNS = tsNS
}
sleepNS := tc.clock.MaxOffset() -
time.Duration(tc.clock.PhysicalNow()-startNS)
if tc.linearizable && sleepNS > 0 {
defer func() {
if log.V(1) {
log.Infof("%v: waiting %s on EndTransaction for linearizability", txn.Short(), util.TruncateDuration(sleepNS, time.Millisecond))
}
time.Sleep(sleepNS)
}()
}
if txn.Status != proto.PENDING {
tc.cleanupTxn(trace, *txn)
}
}
}
}
}