本文整理汇总了Golang中github.com/cockroachdb/cockroach/util/timeutil.Since函数的典型用法代码示例。如果您正苦于以下问题:Golang Since函数的具体用法?Golang Since怎么用?Golang Since使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了Since函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: dumpTrace
func (ts *txnState) dumpTrace() {
if traceSQL && ts.txn != nil {
ts.sp.Finish()
if timeutil.Since(ts.sqlTimestamp) >= traceSQLDuration {
dump := tracing.FormatRawSpans(ts.txn.CollectedSpans)
if len(dump) > 0 {
log.Infof(context.Background(), "%s\n%s", ts.txn.Proto.ID, dump)
}
}
}
ts.sp = nil
}
示例2: clientStatus
func (g *Gossip) clientStatus() string {
var buf bytes.Buffer
g.mu.Lock()
defer g.mu.Unlock()
g.clientsMu.Lock()
defer g.clientsMu.Unlock()
fmt.Fprintf(&buf, "gossip client (%d/%d cur/max conns)\n", len(g.clientsMu.clients), g.outgoing.maxSize)
for _, c := range g.clientsMu.clients {
fmt.Fprintf(&buf, " %d: %s (%s: %d/%d sent/received)\n",
c.peerID, c.addr, roundSecs(timeutil.Since(c.createdAt)), c.sent, c.received)
}
return buf.String()
}
示例3: TestSucceedsSoon
func TestSucceedsSoon(t *testing.T) {
// Try a method which always succeeds.
SucceedsSoon(t, func() error { return nil })
// Try a method which succeeds after a known duration.
start := timeutil.Now()
duration := time.Millisecond * 10
SucceedsSoon(t, func() error {
elapsed := timeutil.Since(start)
if elapsed > duration {
return nil
}
return errors.Errorf("%s elapsed, waiting until %s elapses", elapsed, duration)
})
}
示例4: waitForFullReplication
func (c *cluster) waitForFullReplication() {
for i := 1; true; i++ {
done, detail := c.isReplicated()
if (done && i >= 50) || (i%50) == 0 {
fmt.Print(detail)
log.Infof(context.Background(), "waiting for replication")
}
if done {
break
}
time.Sleep(100 * time.Millisecond)
}
log.Infof(context.Background(), "replicated %.3fs", timeutil.Since(c.started).Seconds())
}
示例5: status
func (s *server) status() string {
s.mu.Lock()
defer s.mu.Unlock()
var buf bytes.Buffer
fmt.Fprintf(&buf, "gossip server (%d/%d cur/max conns, %s)\n",
s.incoming.gauge.Value(), s.incoming.maxSize, s.serverMetrics)
for addr, info := range s.nodeMap {
// TODO(peter): Report per connection sent/received statistics. The
// structure of server.Gossip and server.gossipReceiver makes this
// irritating to track.
fmt.Fprintf(&buf, " %d: %s (%s)\n",
info.peerID, addr.AddressField, roundSecs(timeutil.Since(info.createdAt)))
}
return buf.String()
}
示例6: testPutInner
func testPutInner(t *testing.T, c cluster.Cluster, cfg cluster.TestConfig) {
db, dbStopper := c.NewClient(t, 0)
defer dbStopper.Stop()
errs := make(chan error, c.NumNodes())
start := timeutil.Now()
deadline := start.Add(cfg.Duration)
var count int64
for i := 0; i < c.NumNodes(); i++ {
go func() {
r, _ := randutil.NewPseudoRand()
value := randutil.RandBytes(r, 8192)
for timeutil.Now().Before(deadline) {
k := atomic.AddInt64(&count, 1)
v := value[:r.Intn(len(value))]
if err := db.Put(fmt.Sprintf("%08d", k), v); err != nil {
errs <- err
return
}
}
errs <- nil
}()
}
for i := 0; i < c.NumNodes(); {
baseCount := atomic.LoadInt64(&count)
select {
case <-stopper:
t.Fatalf("interrupted")
case err := <-errs:
if err != nil {
t.Fatal(err)
}
i++
case <-time.After(1 * time.Second):
// Periodically print out progress so that we know the test is still
// running.
loadedCount := atomic.LoadInt64(&count)
log.Infof(context.Background(), "%d (%d/s)", loadedCount, loadedCount-baseCount)
c.Assert(t)
cluster.Consistent(t, c)
}
}
elapsed := timeutil.Since(start)
log.Infof(context.Background(), "%d %.1f/sec", count, float64(count)/elapsed.Seconds())
}
示例7: testClusterRecoveryInner
func testClusterRecoveryInner(t *testing.T, c cluster.Cluster, cfg cluster.TestConfig) {
num := c.NumNodes()
// One client for each node.
initBank(t, c.PGUrl(0))
start := timeutil.Now()
state := testState{
t: t,
errChan: make(chan error, num),
teardown: make(chan struct{}),
deadline: start.Add(cfg.Duration),
clients: make([]testClient, num),
}
for i := 0; i < num; i++ {
state.clients[i].Lock()
state.initClient(t, c, i)
state.clients[i].Unlock()
go transferMoneyLoop(i, &state, *numAccounts, *maxTransfer)
}
defer func() {
<-state.teardown
}()
// Chaos monkey.
rnd, seed := randutil.NewPseudoRand()
log.Warningf(context.Background(), "monkey starts (seed %d)", seed)
pickNodes := func() []int {
return rnd.Perm(num)[:rnd.Intn(num)+1]
}
go chaosMonkey(&state, c, true, pickNodes)
waitClientsStop(num, &state, stall)
// Verify accounts.
verifyAccounts(t, &state.clients[0])
elapsed := timeutil.Since(start)
var count uint64
counts := state.counts()
for _, c := range counts {
count += c
}
log.Infof(context.Background(), "%d %.1f/sec", count, float64(count)/elapsed.Seconds())
}
示例8: processReplica
// processReplica processes a single replica. This should not be
// called externally to the queue. bq.mu.Lock should not be held
// while calling this method.
func (bq *baseQueue) processReplica(repl *Replica, clock *hlc.Clock) error {
bq.processMu.Lock()
defer bq.processMu.Unlock()
// Load the system config.
cfg, ok := bq.gossip.GetSystemConfig()
if !ok {
log.VEventf(1, bq.ctx, "no system config available. skipping")
return nil
}
if bq.requiresSplit(cfg, repl) {
// Range needs to be split due to zone configs, but queue does
// not accept unsplit ranges.
log.VEventf(3, bq.ctx, "%s: split needed; skipping", repl)
return nil
}
sp := repl.store.Tracer().StartSpan(bq.name)
ctx := opentracing.ContextWithSpan(context.Background(), sp)
defer sp.Finish()
log.Tracef(ctx, "processing replica %s", repl)
// If the queue requires a replica to have the range lease in
// order to be processed, check whether this replica has range lease
// and renew or acquire if necessary.
if bq.needsLease {
// Create a "fake" get request in order to invoke redirectOnOrAcquireLease.
if err := repl.redirectOnOrAcquireLease(ctx); err != nil {
if _, harmless := err.GetDetail().(*roachpb.NotLeaseHolderError); harmless {
log.VEventf(3, bq.ctx, "%s: not holding lease; skipping", repl)
return nil
}
return errors.Wrapf(err.GoError(), "%s: could not obtain lease", repl)
}
log.Trace(ctx, "got range lease")
}
log.VEventf(3, bq.ctx, "%s: processing", repl)
start := timeutil.Now()
if err := bq.impl.process(ctx, clock.Now(), repl, cfg); err != nil {
return err
}
log.VEventf(2, bq.ctx, "%s: done: %s", repl, timeutil.Since(start))
log.Trace(ctx, "done")
return nil
}
示例9: start
func (c *cluster) start(db string, args []string) {
c.started = timeutil.Now()
baseCtx := &base.Context{
User: security.NodeUser,
Insecure: true,
}
c.rpcCtx = rpc.NewContext(baseCtx, nil, c.stopper)
for i := range c.nodes {
c.nodes[i] = c.makeNode(i, args)
c.clients[i] = c.makeClient(i)
c.db[i] = c.makeDB(i, db)
}
log.Infof(context.Background(), "started %.3fs", timeutil.Since(c.started).Seconds())
}
示例10: scanLoop
// scanLoop loops endlessly, scanning through replicas available via
// the replica set, or until the scanner is stopped. The iteration
// is paced to complete a full scan in approximately the scan interval.
func (rs *replicaScanner) scanLoop(clock *hlc.Clock, stopper *stop.Stopper) {
stopper.RunWorker(func() {
start := timeutil.Now()
// waitTimer is reset in each call to waitAndProcess.
defer rs.waitTimer.Stop()
for {
if rs.GetDisabled() {
if done := rs.waitEnabled(stopper); done {
return
}
continue
}
var shouldStop bool
count := 0
rs.replicas.Visit(func(repl *Replica) bool {
count++
shouldStop = rs.waitAndProcess(start, clock, stopper, repl)
return !shouldStop
})
if count == 0 {
// No replicas processed, just wait.
shouldStop = rs.waitAndProcess(start, clock, stopper, nil)
}
shouldStop = shouldStop || nil != stopper.RunTask(func() {
// Increment iteration count.
rs.mu.Lock()
defer rs.mu.Unlock()
rs.mu.scanCount++
rs.mu.total += timeutil.Since(start)
if log.V(6) {
log.Infof(context.TODO(), "reset replica scan iteration")
}
// Reset iteration and start time.
start = timeutil.Now()
})
if shouldStop {
return
}
}
})
}
示例11: testNodeRestartInner
func testNodeRestartInner(t *testing.T, c cluster.Cluster, cfg cluster.TestConfig) {
num := c.NumNodes()
if minNum := 3; num < minNum {
t.Skipf("need at least %d nodes, got %d", minNum, num)
}
// One client for each node.
initBank(t, c.PGUrl(0))
start := timeutil.Now()
state := testState{
t: t,
errChan: make(chan error, 1),
teardown: make(chan struct{}),
deadline: start.Add(cfg.Duration),
clients: make([]testClient, 1),
}
client := &state.clients[0]
client.Lock()
client.db = makePGClient(t, c.PGUrl(num-1))
client.Unlock()
go transferMoneyLoop(0, &state, *numAccounts, *maxTransfer)
defer func() {
<-state.teardown
}()
// Chaos monkey.
rnd, seed := randutil.NewPseudoRand()
log.Warningf(context.Background(), "monkey starts (seed %d)", seed)
pickNodes := func() []int {
return []int{rnd.Intn(num - 1)}
}
go chaosMonkey(&state, c, false, pickNodes)
waitClientsStop(1, &state, stall)
// Verify accounts.
verifyAccounts(t, client)
elapsed := timeutil.Since(start)
count := atomic.LoadUint64(&client.count)
log.Infof(context.Background(), "%d %.1f/sec", count, float64(count)/elapsed.Seconds())
}
示例12: processReplica
// processReplica processes a single replica. This should not be
// called externally to the queue. bq.mu.Lock should not be held
// while calling this method.
func (bq *baseQueue) processReplica(repl *Replica, clock *hlc.Clock) error {
// Load the system config.
cfg, ok := bq.gossip.GetSystemConfig()
if !ok {
bq.eventLog.VInfof(log.V(1), "no system config available. skipping")
return nil
}
desc := repl.Desc()
if !bq.acceptsUnsplitRanges && cfg.NeedsSplit(desc.StartKey, desc.EndKey) {
// Range needs to be split due to zone configs, but queue does
// not accept unsplit ranges.
bq.eventLog.VInfof(log.V(3), "%s: split needed; skipping", repl)
return nil
}
sp := repl.store.Tracer().StartSpan(bq.name)
ctx := opentracing.ContextWithSpan(repl.context(context.Background()), sp)
log.Trace(ctx, fmt.Sprintf("queue start for range %d", repl.RangeID))
defer sp.Finish()
// If the queue requires a replica to have the range leader lease in
// order to be processed, check whether this replica has leader lease
// and renew or acquire if necessary.
if bq.needsLeaderLease {
// Create a "fake" get request in order to invoke redirectOnOrAcquireLease.
if err := repl.redirectOnOrAcquireLeaderLease(ctx); err != nil {
if _, harmless := err.GetDetail().(*roachpb.NotLeaderError); harmless {
bq.eventLog.VInfof(log.V(3), "%s: not holding lease; skipping", repl)
return nil
}
return errors.Wrapf(err.GoError(), "%s: could not obtain lease", repl)
}
log.Trace(ctx, "got range lease")
}
bq.eventLog.VInfof(log.V(3), "%s: processing", repl)
start := timeutil.Now()
if err := bq.impl.process(ctx, clock.Now(), repl, cfg); err != nil {
return err
}
bq.eventLog.VInfof(log.V(2), "%s: done: %s", repl, timeutil.Since(start))
log.Trace(ctx, "done")
return nil
}
示例13: scanLoop
// scanLoop loops endlessly, scanning through replicas available via
// the replica set, or until the scanner is stopped. The iteration
// is paced to complete a full scan in approximately the scan interval.
func (rs *replicaScanner) scanLoop(clock *hlc.Clock, stopper *stop.Stopper) {
stopper.RunWorker(func() {
start := timeutil.Now()
// waitTimer is reset in each call to waitAndProcess.
defer rs.waitTimer.Stop()
for {
var shouldStop bool
count := 0
rs.replicas.Visit(func(repl *Replica) bool {
count++
shouldStop = rs.waitAndProcess(start, clock, stopper, repl)
return !shouldStop
})
if count == 0 {
// No replicas processed, just wait.
shouldStop = rs.waitAndProcess(start, clock, stopper, nil)
}
shouldStop = shouldStop || !stopper.RunTask(func() {
// Increment iteration count.
rs.completedScan.L.Lock()
rs.count++
rs.total += timeutil.Since(start)
rs.completedScan.Broadcast()
rs.completedScan.L.Unlock()
if log.V(6) {
log.Infof("reset replica scan iteration")
}
// Reset iteration and start time.
start = timeutil.Now()
})
if shouldStop {
return
}
}
})
}
示例14: PeriodicallyCheckForUpdates
// PeriodicallyCheckForUpdates starts a background worker that periodically
// phones home to check for updates and report usage.
func (s *Server) PeriodicallyCheckForUpdates() {
s.stopper.RunWorker(func() {
startup := timeutil.Now()
for {
// `maybeCheckForUpdates` and `maybeReportUsage` both return the
// duration until they should next be checked.
// Wait for the shorter of the durations returned by the two checks.
wait := s.maybeCheckForUpdates()
if reportWait := s.maybeReportUsage(timeutil.Since(startup)); reportWait < wait {
wait = reportWait
}
jitter := rand.Intn(updateCheckJitterSeconds) - updateCheckJitterSeconds/2
wait = wait + (time.Duration(jitter) * time.Second)
select {
case <-s.stopper.ShouldQuiesce():
return
case <-time.After(wait):
}
}
})
}
示例15: Start
//.........这里部分代码省略.........
continue
}
// Attempt to unmarshal config into a table/database descriptor.
var descriptor sqlbase.Descriptor
if err := kv.Value.GetProto(&descriptor); err != nil {
log.Warningf("%s: unable to unmarshal descriptor %v", kv.Key, kv.Value)
continue
}
switch union := descriptor.Union.(type) {
case *sqlbase.Descriptor_Table:
table := union.Table
if err := table.Validate(); err != nil {
log.Errorf("%s: received invalid table descriptor: %v", kv.Key, table)
continue
}
// Keep track of outstanding schema changes.
// If all schema change commands always set UpVersion, why
// check for the presence of mutations?
// A schema change execution might fail soon after
// unsetting UpVersion, and we still want to process
// outstanding mutations. Similar with a table marked for deletion.
if table.UpVersion || table.Deleted() ||
table.Renamed() || len(table.Mutations) > 0 {
if log.V(2) {
log.Infof("%s: queue up pending schema change; table: %d, version: %d",
kv.Key, table.ID, table.Version)
}
// Only track the first schema change. We depend on
// gossip to renotify us when a schema change has been
// completed.
schemaChanger.tableID = table.ID
if len(table.Mutations) == 0 {
schemaChanger.mutationID = sqlbase.InvalidMutationID
} else {
schemaChanger.mutationID = table.Mutations[0].MutationID
}
schemaChanger.execAfter = execAfter
// Keep track of this schema change.
// Remove from oldSchemaChangers map.
delete(oldSchemaChangers, table.ID)
if sc, ok := s.schemaChangers[table.ID]; ok {
if sc.mutationID == schemaChanger.mutationID {
// Ignore duplicate.
continue
}
}
s.schemaChangers[table.ID] = schemaChanger
}
case *sqlbase.Descriptor_Database:
// Ignore.
}
}
// Delete old schema changers.
for k := range oldSchemaChangers {
delete(s.schemaChangers, k)
}
timer = s.newTimer()
case <-timer.C:
if s.testingKnobs.AsyncSchemaChangerExecNotification != nil &&
s.testingKnobs.AsyncSchemaChangerExecNotification() != nil {
timer = s.newTimer()
continue
}
for tableID, sc := range s.schemaChangers {
if timeutil.Since(sc.execAfter) > 0 {
err := sc.exec(nil, nil)
if err != nil {
if err == errExistingSchemaChangeLease {
} else if err == errDescriptorNotFound {
// Someone deleted this table. Don't try to run the schema
// changer again. Note that there's no gossip update for the
// deletion which would remove this schemaChanger.
delete(s.schemaChangers, tableID)
} else {
// We don't need to act on integrity
// constraints violations because exec()
// purges mutations that violate integrity
// constraints.
log.Warningf("Error executing schema change: %s", err)
}
}
// Advance the execAfter time so that this schema
// changer doesn't get called again for a while.
sc.execAfter = timeutil.Now().Add(delay)
}
// Only attempt to run one schema changer.
break
}
timer = s.newTimer()
case <-stopper.ShouldStop():
return
}
}
})
}