本文整理匯總了Golang中github.com/cockroachdb/cockroach/pkg/util/timeutil.Since函數的典型用法代碼示例。如果您正苦於以下問題:Golang Since函數的具體用法?Golang Since怎麽用?Golang Since使用的例子?那麽, 這裏精選的函數代碼示例或許可以為您提供幫助。
在下文中一共展示了Since函數的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: processReplica
// processReplica processes a single replica. This should not be
// called externally to the queue. bq.mu.Lock must not be held
// while calling this method.
func (bq *baseQueue) processReplica(
queueCtx context.Context, repl *Replica, clock *hlc.Clock,
) error {
bq.processMu.Lock()
defer bq.processMu.Unlock()
// Load the system config.
cfg, ok := bq.gossip.GetSystemConfig()
if !ok {
log.VEventf(queueCtx, 1, "no system config available, skipping")
return nil
}
if bq.requiresSplit(cfg, repl) {
// Range needs to be split due to zone configs, but queue does
// not accept unsplit ranges.
log.VEventf(queueCtx, 3, "%s: split needed; skipping", repl)
return nil
}
// Putting a span in a context means that events will no longer go to the
// event log. Use queueCtx for events that are intended for the event log.
ctx, span := bq.AnnotateCtxWithSpan(queueCtx, bq.name)
defer span.Finish()
// Also add the Replica annotations to ctx.
ctx = repl.AnnotateCtx(ctx)
ctx, cancel := context.WithTimeout(ctx, bq.processTimeout)
defer cancel()
log.Eventf(ctx, "processing replica")
// If the queue requires a replica to have the range lease in
// order to be processed, check whether this replica has range lease
// and renew or acquire if necessary.
if bq.needsLease {
// Create a "fake" get request in order to invoke redirectOnOrAcquireLease.
if err := repl.redirectOnOrAcquireLease(ctx); err != nil {
if _, harmless := err.GetDetail().(*roachpb.NotLeaseHolderError); harmless {
log.VEventf(queueCtx, 3, "not holding lease; skipping")
return nil
}
return errors.Wrapf(err.GoError(), "%s: could not obtain lease", repl)
}
log.Event(ctx, "got range lease")
}
log.VEventf(queueCtx, 3, "processing")
start := timeutil.Now()
err := bq.impl.process(ctx, clock.Now(), repl, cfg)
duration := timeutil.Since(start)
bq.processingNanos.Inc(duration.Nanoseconds())
if err != nil {
return err
}
log.VEventf(queueCtx, 2, "done: %s", duration)
log.Event(ctx, "done")
bq.successes.Inc(1)
return nil
}
示例2: testDecimalSingleArgFunc
func testDecimalSingleArgFunc(
t *testing.T,
f func(*inf.Dec, *inf.Dec, inf.Scale) (*inf.Dec, error),
s inf.Scale,
tests []decimalOneArgTestCase,
) {
for _, tc := range tests {
t.Run(fmt.Sprintf("%s = %s", tc.input, tc.expected), func(t *testing.T) {
x, exp := new(inf.Dec), new(inf.Dec)
x.SetString(tc.input)
exp.SetString(tc.expected)
// Test allocated return value.
var z *inf.Dec
var err error
done := make(chan struct{}, 1)
start := timeutil.Now()
go func() {
z, err = f(nil, x, s)
done <- struct{}{}
}()
var after <-chan time.Time
if *flagDurationLimit > 0 {
after = time.After(*flagDurationLimit)
}
select {
case <-done:
t.Logf("execute duration: %s", timeutil.Since(start))
case <-after:
t.Fatalf("timedout after %s", *flagDurationLimit)
}
if err != nil {
if tc.expected != err.Error() {
t.Errorf("expected error %s, got %s", tc.expected, err)
}
return
}
if exp.Cmp(z) != 0 {
t.Errorf("expected %s, got %s", exp, z)
}
// Test provided decimal mutation.
z.SetString("0.0")
_, _ = f(z, x, s)
if exp.Cmp(z) != 0 {
t.Errorf("expected %s, got %s", exp, z)
}
// Test same arg mutation.
_, _ = f(x, x, s)
if exp.Cmp(x) != 0 {
t.Errorf("expected %s, got %s", exp, x)
}
x.SetString(tc.input)
})
}
}
示例3: testPutInner
func testPutInner(ctx context.Context, t *testing.T, c cluster.Cluster, cfg cluster.TestConfig) {
db, err := c.NewClient(ctx, 0)
if err != nil {
t.Fatal(err)
}
errs := make(chan error, c.NumNodes())
start := timeutil.Now()
deadline := start.Add(cfg.Duration)
var count int64
for i := 0; i < c.NumNodes(); i++ {
go func() {
r, _ := randutil.NewPseudoRand()
value := randutil.RandBytes(r, 8192)
for timeutil.Now().Before(deadline) {
k := atomic.AddInt64(&count, 1)
v := value[:r.Intn(len(value))]
if err := db.Put(ctx, fmt.Sprintf("%08d", k), v); err != nil {
errs <- err
return
}
}
errs <- nil
}()
}
for i := 0; i < c.NumNodes(); {
baseCount := atomic.LoadInt64(&count)
select {
case <-stopper.ShouldStop():
t.Fatalf("interrupted")
case err := <-errs:
if err != nil {
t.Fatal(err)
}
i++
case <-time.After(1 * time.Second):
// Periodically print out progress so that we know the test is still
// running.
loadedCount := atomic.LoadInt64(&count)
log.Infof(ctx, "%d (%d/s)", loadedCount, loadedCount-baseCount)
c.Assert(ctx, t)
if err := cluster.Consistent(ctx, c, 0); err != nil {
t.Fatal(err)
}
}
}
elapsed := timeutil.Since(start)
log.Infof(ctx, "%d %.1f/sec", count, float64(count)/elapsed.Seconds())
}
示例4: testClusterRecoveryInner
func testClusterRecoveryInner(
ctx context.Context, t *testing.T, c cluster.Cluster, cfg cluster.TestConfig,
) {
num := c.NumNodes()
// One client for each node.
initBank(t, c.PGUrl(ctx, 0))
start := timeutil.Now()
state := testState{
t: t,
errChan: make(chan error, num),
teardown: make(chan struct{}),
deadline: start.Add(cfg.Duration),
clients: make([]testClient, num),
}
for i := 0; i < num; i++ {
state.clients[i].Lock()
state.initClient(ctx, t, c, i)
state.clients[i].Unlock()
go transferMoneyLoop(ctx, i, &state, *numAccounts, *maxTransfer)
}
defer func() {
<-state.teardown
}()
// Chaos monkey.
rnd, seed := randutil.NewPseudoRand()
log.Warningf(ctx, "monkey starts (seed %d)", seed)
pickNodes := func() []int {
return rnd.Perm(num)[:rnd.Intn(num)+1]
}
go chaosMonkey(ctx, &state, c, true, pickNodes, 0)
waitClientsStop(ctx, num, &state, stall)
// Verify accounts.
verifyAccounts(t, &state.clients[0])
elapsed := timeutil.Since(start)
var count uint64
counts := state.counts()
for _, c := range counts {
count += c
}
log.Infof(ctx, "%d %.1f/sec", count, float64(count)/elapsed.Seconds())
}
示例5: TestSucceedsSoon
func TestSucceedsSoon(t *testing.T) {
// Try a method which always succeeds.
SucceedsSoon(t, func() error { return nil })
// Try a method which succeeds after a known duration.
start := timeutil.Now()
duration := time.Millisecond * 10
SucceedsSoon(t, func() error {
elapsed := timeutil.Since(start)
if elapsed > duration {
return nil
}
return errors.Errorf("%s elapsed, waiting until %s elapses", elapsed, duration)
})
}
示例6: waitForFullReplication
// waitForFullReplication waits for the cluster to be fully replicated.
func (c *Cluster) waitForFullReplication() {
for i := 1; true; i++ {
done, detail := c.isReplicated()
if (done && i >= 50) || (i%50) == 0 {
fmt.Print(detail)
log.Infof(context.Background(), "waiting for replication")
}
if done {
break
}
time.Sleep(100 * time.Millisecond)
}
log.Infof(context.Background(), "replicated %.3fs", timeutil.Since(c.started).Seconds())
}
示例7: clientStatus
func (g *Gossip) clientStatus() string {
var buf bytes.Buffer
g.mu.Lock()
defer g.mu.Unlock()
g.clientsMu.Lock()
defer g.clientsMu.Unlock()
fmt.Fprintf(&buf, "gossip client (%d/%d cur/max conns)\n", len(g.clientsMu.clients), g.outgoing.maxSize)
for _, c := range g.clientsMu.clients {
fmt.Fprintf(&buf, " %d: %s (%s: %s)\n",
c.peerID, c.addr, roundSecs(timeutil.Since(c.createdAt)), c.clientMetrics)
}
return buf.String()
}
示例8: testNodeRestartInner
func testNodeRestartInner(
ctx context.Context, t *testing.T, c cluster.Cluster, cfg cluster.TestConfig,
) {
num := c.NumNodes()
if minNum := 3; num < minNum {
t.Skipf("need at least %d nodes, got %d", minNum, num)
}
// One client for each node.
initBank(t, c.PGUrl(ctx, 0))
start := timeutil.Now()
state := testState{
t: t,
errChan: make(chan error, 1),
teardown: make(chan struct{}),
deadline: start.Add(cfg.Duration),
clients: make([]testClient, 1),
}
clientIdx := num - 1
client := &state.clients[0]
client.Lock()
client.db = makePGClient(t, c.PGUrl(ctx, clientIdx))
client.Unlock()
go transferMoneyLoop(ctx, 0, &state, *numAccounts, *maxTransfer)
defer func() {
<-state.teardown
}()
// Chaos monkey.
rnd, seed := randutil.NewPseudoRand()
log.Warningf(ctx, "monkey starts (seed %d)", seed)
pickNodes := func() []int {
return []int{rnd.Intn(clientIdx)}
}
go chaosMonkey(ctx, &state, c, false, pickNodes, clientIdx)
waitClientsStop(ctx, 1, &state, stall)
// Verify accounts.
verifyAccounts(t, client)
elapsed := timeutil.Since(start)
count := atomic.LoadUint64(&client.count)
log.Infof(ctx, "%d %.1f/sec", count, float64(count)/elapsed.Seconds())
}
示例9: finishSQLTxn
// finishSQLTxn closes the root span for the current SQL txn.
// This needs to be called before resetForNewSQLTransaction() is called for
// starting another SQL txn.
// The session context is just used for logging the SQL trace.
func (ts *txnState) finishSQLTxn(sessionCtx context.Context) {
ts.mon.Stop(ts.Ctx)
if ts.sp == nil {
panic("No span in context? Was resetForNewSQLTxn() called previously?")
}
sampledFor7881 := (ts.sp.BaggageItem(keyFor7881Sample) != "")
ts.sp.Finish()
ts.sp = nil
if (traceSQL && timeutil.Since(ts.sqlTimestamp) >= traceSQLDuration) ||
(traceSQLFor7881 && sampledFor7881) {
dump := tracing.FormatRawSpans(ts.CollectedSpans)
if len(dump) > 0 {
log.Infof(sessionCtx, "SQL trace:\n%s", dump)
}
}
}
示例10: scanLoop
// scanLoop loops endlessly, scanning through replicas available via
// the replica set, or until the scanner is stopped. The iteration
// is paced to complete a full scan in approximately the scan interval.
func (rs *replicaScanner) scanLoop(clock *hlc.Clock, stopper *stop.Stopper) {
stopper.RunWorker(func() {
ctx := rs.AnnotateCtx(context.Background())
start := timeutil.Now()
// waitTimer is reset in each call to waitAndProcess.
defer rs.waitTimer.Stop()
for {
if rs.GetDisabled() {
if done := rs.waitEnabled(stopper); done {
return
}
continue
}
var shouldStop bool
count := 0
rs.replicas.Visit(func(repl *Replica) bool {
count++
shouldStop = rs.waitAndProcess(ctx, start, clock, stopper, repl)
return !shouldStop
})
if count == 0 {
// No replicas processed, just wait.
shouldStop = rs.waitAndProcess(ctx, start, clock, stopper, nil)
}
shouldStop = shouldStop || nil != stopper.RunTask(func() {
// Increment iteration count.
rs.mu.Lock()
defer rs.mu.Unlock()
rs.mu.scanCount++
rs.mu.total += timeutil.Since(start)
if log.V(6) {
log.Infof(ctx, "reset replica scan iteration")
}
// Reset iteration and start time.
start = timeutil.Now()
})
if shouldStop {
return
}
}
})
}
示例11: Start
// Start starts a cluster. The numWorkers parameter controls the SQL connection
// settings to avoid unnecessary connection creation. The args parameter can be
// used to pass extra arguments to each node.
func (c *Cluster) Start(db string, numWorkers int, args, env []string) {
c.started = timeutil.Now()
baseCtx := &base.Config{
User: security.NodeUser,
Insecure: true,
}
c.rpcCtx = rpc.NewContext(log.AmbientContext{}, baseCtx, nil, c.stopper)
for i := range c.Nodes {
c.Nodes[i] = c.makeNode(i, args, env)
c.Clients[i] = c.makeClient(i)
c.Status[i] = c.makeStatus(i)
c.DB[i] = c.makeDB(i, numWorkers, db)
}
log.Infof(context.Background(), "started %.3fs", timeutil.Since(c.started).Seconds())
c.waitForFullReplication()
}
示例12: PeriodicallyCheckForUpdates
// PeriodicallyCheckForUpdates starts a background worker that periodically
// phones home to check for updates and report usage.
func (s *Server) PeriodicallyCheckForUpdates() {
s.stopper.RunWorker(func() {
startup := timeutil.Now()
for {
// `maybeCheckForUpdates` and `maybeReportUsage` both return the
// duration until they should next be checked.
// Wait for the shorter of the durations returned by the two checks.
wait := s.maybeCheckForUpdates()
if reportWait := s.maybeReportUsage(timeutil.Since(startup)); reportWait < wait {
wait = reportWait
}
jitter := rand.Intn(updateCheckJitterSeconds) - updateCheckJitterSeconds/2
wait = wait + (time.Duration(jitter) * time.Second)
select {
case <-s.stopper.ShouldQuiesce():
return
case <-time.After(wait):
}
}
})
}
示例13: TestConcurrentBatch
func TestConcurrentBatch(t *testing.T) {
defer leaktest.AfterTest(t)()
dir, err := ioutil.TempDir("", "TestConcurrentBatch")
if err != nil {
t.Fatal(err)
}
defer func() {
if err := os.RemoveAll(dir); err != nil {
t.Fatal(err)
}
}()
db, err := NewRocksDB(roachpb.Attributes{}, dir, RocksDBCache{},
0, DefaultMaxOpenFiles)
if err != nil {
t.Fatalf("could not create new rocksdb db instance at %s: %v", dir, err)
}
defer db.Close()
// Prepare 16 4 MB batches containing non-overlapping contents.
var batches []Batch
for i := 0; i < 16; i++ {
batch := db.NewBatch()
for j := 0; true; j++ {
key := encoding.EncodeUvarintAscending([]byte("bar"), uint64(i))
key = encoding.EncodeUvarintAscending(key, uint64(j))
if err := batch.Put(MakeMVCCMetadataKey(key), nil); err != nil {
t.Fatal(err)
}
if len(batch.Repr()) >= 4<<20 {
break
}
}
batches = append(batches, batch)
}
errChan := make(chan error, len(batches))
// Concurrently write all the batches.
for _, batch := range batches {
go func(batch Batch) {
errChan <- batch.Commit()
}(batch)
}
// While the batch writes are in progress, try to write another key.
time.Sleep(100 * time.Millisecond)
remainingBatches := len(batches)
for i := 0; remainingBatches > 0; i++ {
select {
case err := <-errChan:
if err != nil {
t.Fatal(err)
}
remainingBatches--
default:
}
// This write can get delayed excessively if we hit the max memtable count
// or the L0 stop writes threshold.
start := timeutil.Now()
key := encoding.EncodeUvarintAscending([]byte("foo"), uint64(i))
if err := db.Put(MakeMVCCMetadataKey(key), nil); err != nil {
t.Fatal(err)
}
if elapsed := timeutil.Since(start); elapsed >= 10*time.Second {
t.Fatalf("write took %0.1fs\n", elapsed.Seconds())
}
}
}
示例14: grpcTransportFactory
// grpcTransportFactory during race builds wraps the implementation and
// intercepts all BatchRequests, reading them in a tight loop. This allows the
// race detector to catch any mutations of a batch passed to the transport.
func grpcTransportFactory(
opts SendOptions, rpcContext *rpc.Context, replicas ReplicaSlice, args roachpb.BatchRequest,
) (Transport, error) {
if atomic.AddInt32(&running, 1) <= 1 {
rpcContext.Stopper.RunWorker(func() {
var iters int
var curIdx int
defer func() {
atomic.StoreInt32(&running, 0)
log.Infof(
context.TODO(),
"transport race promotion: ran %d iterations on up to %d requests",
iters, curIdx+1,
)
}()
// Make a fixed-size slice of *BatchRequest. When full, entries
// are evicted in FIFO order.
const size = 1000
bas := make([]*roachpb.BatchRequest, size)
encoder := gob.NewEncoder(ioutil.Discard)
for {
iters++
start := timeutil.Now()
for _, ba := range bas {
if ba != nil {
if err := encoder.Encode(ba); err != nil {
panic(err)
}
}
}
// Prevent the goroutine from spinning too hot as this lets CI
// times skyrocket. Sleep on average for as long as we worked
// on the last iteration so we spend no more than half our CPU
// time on this task.
jittered := time.After(jitter(timeutil.Since(start)))
// Collect incoming requests until the jittered timer fires,
// then access everything we have.
for {
select {
case <-rpcContext.Stopper.ShouldStop():
return
case ba := <-incoming:
bas[curIdx%size] = ba
curIdx++
continue
case <-jittered:
}
break
}
}
})
}
select {
// We have a shallow copy here and so the top level scalar fields can't
// really race, but making more copies doesn't make anything more
// transparent, so from now on we operate on a pointer.
case incoming <- &args:
default:
// Avoid slowing down the tests if we're backed up.
}
return grpcTransportFactoryImpl(opts, rpcContext, replicas, args)
}
示例15: TestSessionFinishRollsBackTxn
//.........這裏部分代碼省略.........
if connClosed {
return
}
if err := conn.Close(); err != nil {
t.Fatal(err)
}
}()
txn, err := conn.Begin()
if err != nil {
t.Fatal(err)
}
tx := txn.(driver.Execer)
if _, err := tx.Exec("SET TRANSACTION PRIORITY NORMAL", nil); err != nil {
t.Fatal(err)
}
if state == sql.RestartWait || state == sql.CommitWait {
if _, err := tx.Exec("SAVEPOINT cockroach_restart", nil); err != nil {
t.Fatal(err)
}
}
insertStmt := "INSERT INTO t.test(k, v) VALUES (1, 'a')"
if state == sql.RestartWait {
// To get a txn in RestartWait, we'll use an aborter.
if err := aborter.QueueStmtForAbortion(
insertStmt, 1 /* restartCount */, false /* willBeRetriedIbid */); err != nil {
t.Fatal(err)
}
}
if _, err := tx.Exec(insertStmt, nil); err != nil {
t.Fatal(err)
}
if err := aborter.VerifyAndClear(); err != nil {
t.Fatal(err)
}
if state == sql.RestartWait || state == sql.CommitWait {
_, err := tx.Exec("RELEASE SAVEPOINT cockroach_restart", nil)
if state == sql.CommitWait {
if err != nil {
t.Fatal(err)
}
} else if !testutils.IsError(err, "pq: restart transaction:.*") {
t.Fatal(err)
}
}
// Abruptly close the connection.
connClosed = true
if err := conn.Close(); err != nil {
t.Fatal(err)
}
// Check that the txn we had above was rolled back. We do this by reading
// after the preceding txn and checking that we don't get an error and
// that we haven't been blocked by intents (we can't exactly test that we
// haven't been blocked but we assert that the query didn't take too
// long).
// We do the read in an explicit txn so that automatic retries don't hide
// any errors.
// TODO(andrei): Figure out a better way to test for non-blocking.
// Use a trace when the client-side tracing story gets good enough.
txCheck, err := mainDB.Begin()
if err != nil {
t.Fatal(err)
}
// Run check at low priority so we don't push the previous transaction and
// fool ourselves into thinking it had been rolled back.
if _, err := txCheck.Exec("SET TRANSACTION PRIORITY LOW"); err != nil {
t.Fatal(err)
}
ts := timeutil.Now()
var count int
if err := txCheck.QueryRow("SELECT COUNT(1) FROM t.test").Scan(&count); err != nil {
t.Fatal(err)
}
// CommitWait actually committed, so we'll need to clean up.
if state != sql.CommitWait {
if count != 0 {
t.Fatalf("expected no rows, got: %d", count)
}
} else {
if _, err := txCheck.Exec("DELETE FROM t.test"); err != nil {
t.Fatal(err)
}
}
if err := txCheck.Commit(); err != nil {
t.Fatal(err)
}
if d := timeutil.Since(ts); d > time.Second {
t.Fatalf("Looks like the checking tx was unexpectedly blocked. "+
"It took %s to commit.", d)
}
})
}
}