本文整理汇总了Golang中github.com/cockroachdb/cockroach/util/retry.Start函数的典型用法代码示例。如果您正苦于以下问题:Golang Start函数的具体用法?Golang Start怎么用?Golang Start使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了Start函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: WaitForFullReplication
// WaitForFullReplication waits until all stores in the cluster
// have no ranges with replication pending.
func (tc *TestCluster) WaitForFullReplication() error {
opts := retry.Options{
InitialBackoff: time.Millisecond * 10,
MaxBackoff: time.Millisecond * 100,
Multiplier: 2,
}
notReplicated := true
for r := retry.Start(opts); r.Next() && notReplicated; {
notReplicated = false
for _, s := range tc.Servers {
err := s.Stores().VisitStores(func(s *storage.Store) error {
if err := s.ComputeMetrics(); err != nil {
return err
}
if s.Registry().GetGauge("ranges.replication-pending").Value() > 0 {
notReplicated = true
}
return nil
})
if err != nil {
return err
}
if notReplicated {
break
}
}
}
return nil
}
示例2: request
// request returns the result of performing a http get request.
func request(url string, httpClient *http.Client) ([]byte, bool) {
for r := retry.Start(retryOptions); r.Next(); {
req, err := http.NewRequest("GET", url, nil)
if err != nil {
log.Fatal(err)
return nil, false
}
req.Header.Set(util.AcceptHeader, util.JSONContentType)
resp, err := httpClient.Do(req)
if err != nil {
log.Infof("could not GET %s - %s", url, err)
continue
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
log.Infof("could not ready body for %s - %s", url, err)
continue
}
if resp.StatusCode != http.StatusOK {
log.Infof("could not GET %s - statuscode: %d - body: %s", url, resp.StatusCode, body)
continue
}
returnedContentType := resp.Header.Get(util.ContentTypeHeader)
if returnedContentType != util.JSONContentType {
log.Infof("unexpected content type: %v", returnedContentType)
continue
}
log.Infof("OK response from %s", url)
return body, true
}
log.Warningf("There was an error retrieving %s", url)
return nil, false
}
示例3: GetSnapshot
// GetSnapshot wraps Snapshot() but does not require the replica lock
// to be held and it will block instead of returning
// ErrSnapshotTemporaryUnavailable.
func (r *Replica) GetSnapshot() (raftpb.Snapshot, error) {
retryOptions := retry.Options{
InitialBackoff: 1 * time.Millisecond,
MaxBackoff: 50 * time.Millisecond,
Multiplier: 2,
}
for retry := retry.Start(retryOptions); retry.Next(); {
r.mu.Lock()
snap, err := r.Snapshot()
snapshotChan := r.mu.snapshotChan
r.mu.Unlock()
if err == raft.ErrSnapshotTemporarilyUnavailable {
if snapshotChan == nil {
// The call to Snapshot() didn't start an async process due to
// rate limiting. Try again later.
continue
}
var ok bool
snap, ok = <-snapshotChan
if ok {
return snap, nil
}
// Each snapshot worker's output can only be consumed once.
// We could be racing with raft itself, so if we get a closed
// channel loop back and try again.
} else {
return snap, err
}
}
panic("unreachable") // due to infinite retries
}
示例4: exec
func (txn *Txn) exec(retryable func(txn *Txn) error) error {
// Run retryable in a retry loop until we encounter a success or
// error condition this loop isn't capable of handling.
var err error
for r := retry.Start(txn.db.txnRetryOptions); r.Next(); {
err = retryable(txn)
if err == nil && txn.Proto.Status == roachpb.PENDING {
// retryable succeeded, but didn't commit.
err = txn.commit(nil)
}
if restartErr, ok := err.(roachpb.TransactionRestartError); ok {
if log.V(2) {
log.Warning(err)
}
switch restartErr.CanRestartTransaction() {
case roachpb.TransactionRestart_IMMEDIATE:
r.Reset()
continue
case roachpb.TransactionRestart_BACKOFF:
continue
}
// By default, fall through and break.
}
break
}
txn.Cleanup(err)
return err
}
示例5: recordJoinEvent
// recordJoinEvent begins an asynchronous task which attempts to log a "node
// join" or "node restart" event. This query will retry until it succeeds or the
// server stops.
func (n *Node) recordJoinEvent() {
if !n.ctx.LogRangeEvents {
return
}
logEventType := sql.EventLogNodeRestart
if n.initialBoot {
logEventType = sql.EventLogNodeJoin
}
n.stopper.RunWorker(func() {
for r := retry.Start(retry.Options{Closer: n.stopper.ShouldStop()}); r.Next(); {
if err := n.ctx.DB.Txn(func(txn *client.Txn) error {
return n.eventLogger.InsertEventRecord(txn,
logEventType,
int32(n.Descriptor.NodeID),
int32(n.Descriptor.NodeID),
struct {
Descriptor roachpb.NodeDescriptor
ClusterID uuid.UUID
StartedAt int64
}{n.Descriptor, n.ClusterID, n.startedAt},
)
}); err != nil {
log.Warningc(n.context(context.TODO()), "unable to log %s event for node %d: %s", logEventType, n.Descriptor.NodeID, err)
} else {
return
}
}
})
}
示例6: recordJoinEvent
// recordJoinEvent begins an asynchronous task which attempts to log a "node
// join" or "node restart" event. This query will retry until it succeeds or the
// server stops.
func (n *Node) recordJoinEvent() {
if !n.ctx.LogRangeEvents {
return
}
logEventType := sql.EventLogNodeRestart
if n.initialBoot {
logEventType = sql.EventLogNodeJoin
}
n.stopper.RunWorker(func() {
retryOpts := base.DefaultRetryOptions()
retryOpts.Closer = n.stopper.ShouldStop()
for r := retry.Start(retryOpts); r.Next(); {
if err := n.ctx.DB.Txn(n.Ctx(), func(txn *client.Txn) error {
return n.eventLogger.InsertEventRecord(txn,
logEventType,
int32(n.Descriptor.NodeID),
int32(n.Descriptor.NodeID),
struct {
Descriptor roachpb.NodeDescriptor
ClusterID uuid.UUID
StartedAt int64
}{n.Descriptor, n.ClusterID, n.startedAt},
)
}); err != nil {
log.Warningf(n.Ctx(), "%s: unable to log %s event: %s", n, logEventType, err)
} else {
return
}
}
})
}
示例7: maybeWarnAboutInit
// maybeWarnAboutInit looks for signs indicating a cluster which
// hasn't been initialized and warns. There's no absolutely sure way
// to determine whether the current node is simply waiting to be
// bootstrapped to an existing cluster vs. the operator having failed
// to initialize the cluster via the "cockroach init" command, so
// we can only warn.
//
// This method checks whether all gossip bootstrap hosts are
// connected, and whether the node itself is a bootstrap host, but
// there is still no sentinel gossip.
func (g *Gossip) maybeWarnAboutInit(stopper *stop.Stopper) {
stopper.RunWorker(func() {
// Wait 5s before first check.
select {
case <-stopper.ShouldStop():
return
case <-time.After(5 * time.Second):
}
retryOptions := retry.Options{
InitialBackoff: 5 * time.Second, // first backoff at 5s
MaxBackoff: 60 * time.Second, // max backoff is 60s
Multiplier: 2, // doubles
Stopper: stopper, // stop no matter what on stopper
}
// This will never error because of infinite retries.
for r := retry.Start(retryOptions); r.Next(); {
g.mu.Lock()
hasSentinel := g.is.getInfo(KeySentinel) != nil
triedAll := g.triedAll
g.mu.Unlock()
// If we have the sentinel, exit the retry loop.
if hasSentinel {
break
}
// Otherwise, if all bootstrap hosts are connected, warn.
if triedAll {
log.Warningf("connected to gossip but missing sentinel. Has the cluster been initialized? " +
"Use \"cockroach init\" to initialize.")
}
}
})
}
示例8: waitForOneVersion
// waitForOneVersion returns once there are no unexpired leases on the
// previous version of the table descriptor. It returns the current version.
// After returning there can only be versions of the descriptor >= to the
// returned version. Lease acquisition (see acquire()) maintains the
// invariant that no new leases for desc.Version-1 will be granted once
// desc.Version exists.
func (s LeaseStore) waitForOneVersion(tableID sqlbase.ID, retryOpts retry.Options) (
sqlbase.DescriptorVersion, error,
) {
desc := &sqlbase.Descriptor{}
descKey := sqlbase.MakeDescMetadataKey(tableID)
var tableDesc *sqlbase.TableDescriptor
for r := retry.Start(retryOpts); r.Next(); {
// Get the current version of the table descriptor non-transactionally.
//
// TODO(pmattis): Do an inconsistent read here?
if err := s.db.GetProto(descKey, desc); err != nil {
return 0, err
}
tableDesc = desc.GetTable()
if tableDesc == nil {
return 0, errors.Errorf("ID %d is not a table", tableID)
}
// Check to see if there are any leases that still exist on the previous
// version of the descriptor.
now := s.clock.Now()
count, err := s.countLeases(tableDesc.ID, tableDesc.Version-1, now.GoTime())
if err != nil {
return 0, err
}
if count == 0 {
break
}
log.Infof(context.TODO(), "publish (count leases): descID=%d name=%s version=%d count=%d",
tableDesc.ID, tableDesc.Name, tableDesc.Version-1, count)
}
return tableDesc.Version, nil
}
示例9: Send
// Send sends call to Cockroach via an RPC request. Errors which are retryable
// are retried with backoff in a loop using the default retry options. Other
// errors sending the request are retried indefinitely using the same client
// command ID to avoid reporting failure when in fact the command may have gone
// through and been executed successfully. We retry here to eventually get
// through with the same client command ID and be given the cached response.
func (s *rpcSender) Send(_ context.Context, call proto.Call) {
method := fmt.Sprintf("Server.%s", call.Args.Method())
var err error
for r := retry.Start(s.retryOpts); r.Next(); {
select {
case <-s.client.Healthy():
default:
err = fmt.Errorf("failed to send RPC request %s: client is unhealthy", method)
log.Warning(err)
continue
}
if err = s.client.Call(method, call.Args, call.Reply); err != nil {
// Assume all errors sending request are retryable. The actual
// number of things that could go wrong is vast, but we don't
// want to miss any which should in theory be retried with the
// same client command ID. We log the error here as a warning so
// there's visiblity that this is happening. Some of the errors
// we'll sweep up in this net shouldn't be retried, but we can't
// really know for sure which.
log.Warningf("failed to send RPC request %s: %s", method, err)
continue
}
// On successful post, we're done with retry loop.
break
}
if err != nil {
call.Reply.Header().SetGoError(err)
}
}
示例10: runPgbenchQueryParallel
// Tests a batch of queries very similar to those that that PGBench runs
// in its TPC-B(ish) mode.
func runPgbenchQueryParallel(b *testing.B, db *sql.DB) {
if err := pgbench.SetupBenchDB(db, 20000, true /*quiet*/); err != nil {
b.Fatal(err)
}
retryOpts := retry.Options{
InitialBackoff: 1 * time.Millisecond,
MaxBackoff: 200 * time.Millisecond,
Multiplier: 2,
}
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
src := rand.New(rand.NewSource(5432))
r := retry.Start(retryOpts)
var err error
for pb.Next() {
r.Reset()
for r.Next() {
err = pgbench.RunOne(db, src, 20000)
if err == nil {
break
}
}
if err != nil {
b.Fatal(err)
}
}
})
b.StopTimer()
}
示例11: get
// get performs an HTTPS GET to the specified path for a specific node.
func get(t *testing.T, client *http.Client, node *localcluster.Container, path string) []byte {
url := fmt.Sprintf("https://%s%s", node.Addr(""), path)
// TODO(bram) #2059: Remove retry logic.
for r := retry.Start(retryOptions); r.Next(); {
resp, err := client.Get(url)
if err != nil {
t.Logf("could not GET %s - %s", url, err)
continue
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
t.Logf("could not read body for %s - %s", url, err)
continue
}
if resp.StatusCode != http.StatusOK {
t.Logf("could not GET %s - statuscode: %d - body: %s", url, resp.StatusCode, body)
continue
}
t.Logf("OK response from %s", url)
return body
}
t.Fatalf("There was an error retrieving %s", url)
return []byte("")
}
示例12: exec
func (txn *Txn) exec(retryable func(txn *Txn) error) (err error) {
// Run retryable in a retry loop until we encounter a success or
// error condition this loop isn't capable of handling.
for r := retry.Start(txn.db.txnRetryOptions); r.Next(); {
txn.haveTxnWrite, txn.haveEndTxn = false, false // always reset before [re]starting txn
if err = retryable(txn); err == nil {
if !txn.haveEndTxn && txn.haveTxnWrite {
// If there were no errors running retryable, commit the txn. This
// may block waiting for outstanding writes to complete in case
// retryable didn't -- we need the most recent of all response
// timestamps in order to commit.
err = txn.Commit()
}
}
if restartErr, ok := err.(proto.TransactionRestartError); ok {
if log.V(2) {
log.Warning(err)
}
if restartErr.CanRestartTransaction() == proto.TransactionRestart_IMMEDIATE {
r.Reset()
continue
} else if restartErr.CanRestartTransaction() == proto.TransactionRestart_BACKOFF {
continue
}
// By default, fall through and break.
}
break
}
if err != nil && txn.haveTxnWrite {
if replyErr := txn.Rollback(); replyErr != nil {
log.Errorf("failure aborting transaction: %s; abort caused by: %s", replyErr, err)
}
}
return
}
示例13: GetSnapshot
// GetSnapshot wraps Snapshot() but does not require the replica lock
// to be held and it will block instead of returning
// ErrSnapshotTemporaryUnavailable.
func (r *Replica) GetSnapshot(ctx context.Context) (raftpb.Snapshot, error) {
retryOptions := retry.Options{
InitialBackoff: 1 * time.Millisecond,
MaxBackoff: 50 * time.Millisecond,
Multiplier: 2,
Closer: r.store.Stopper().ShouldQuiesce(),
}
for retry := retry.Start(retryOptions); retry.Next(); {
log.Tracef(ctx, "snapshot retry loop pass %d", retry.CurrentAttempt())
r.mu.Lock()
snap, err := r.SnapshotWithContext(ctx)
snapshotChan := r.mu.snapshotChan
r.mu.Unlock()
if err == raft.ErrSnapshotTemporarilyUnavailable {
if snapshotChan == nil {
// The call to Snapshot() didn't start an async process due to
// rate limiting. Try again later.
continue
}
var ok bool
snap, ok = <-snapshotChan
if ok {
return snap, nil
}
// Each snapshot worker's output can only be consumed once.
// We could be racing with raft itself, so if we get a closed
// channel loop back and try again.
} else {
return snap, err
}
}
return raftpb.Snapshot{}, &roachpb.NodeUnavailableError{}
}
示例14: exec
func (txn *Txn) exec(retryable func(txn *Txn) *roachpb.Error) *roachpb.Error {
// Run retryable in a retry loop until we encounter a success or
// error condition this loop isn't capable of handling.
var pErr *roachpb.Error
for r := retry.Start(txn.db.txnRetryOptions); r.Next(); {
pErr = retryable(txn)
if pErr == nil && txn.Proto.Status == roachpb.PENDING {
// retryable succeeded, but didn't commit.
pErr = txn.commit(nil)
}
if pErr != nil {
switch pErr.TransactionRestart {
case roachpb.TransactionRestart_IMMEDIATE:
if log.V(2) {
log.Warning(pErr)
}
r.Reset()
continue
case roachpb.TransactionRestart_BACKOFF:
if log.V(2) {
log.Warning(pErr)
}
continue
}
// By default, fall through and break.
}
break
}
txn.Cleanup(pErr)
return pErr
}
示例15: get
// get performs an HTTPS GET to the specified path for a specific node.
func get(t *testing.T, base, rel string) []byte {
// TODO(bram) #2059: Remove retry logic.
url := fmt.Sprintf("%s/%s", base, rel)
for r := retry.Start(retryOptions); r.Next(); {
resp, err := cluster.HTTPClient.Get(url)
if err != nil {
log.Infof("could not GET %s - %s", url, err)
continue
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
log.Infof("could not read body for %s - %s", url, err)
continue
}
if resp.StatusCode != http.StatusOK {
log.Infof("could not GET %s - statuscode: %d - body: %s", url, resp.StatusCode, body)
continue
}
if log.V(1) {
log.Infof("OK response from %s", url)
}
return body
}
t.Fatalf("There was an error retrieving %s", url)
return []byte("")
}