本文整理匯總了Golang中github.com/cockroachdb/cockroach/pkg/util/timeutil.Now函數的典型用法代碼示例。如果您正苦於以下問題:Golang Now函數的具體用法?Golang Now怎麽用?Golang Now使用的例子?那麽, 這裏精選的函數代碼示例或許可以為您提供幫助。
在下文中一共展示了Now函數的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: runMVCCConditionalPut
func runMVCCConditionalPut(emk engineMaker, valueSize int, createFirst bool, b *testing.B) {
rng, _ := randutil.NewPseudoRand()
value := roachpb.MakeValueFromBytes(randutil.RandBytes(rng, valueSize))
keyBuf := append(make([]byte, 0, 64), []byte("key-")...)
eng := emk(b, fmt.Sprintf("cput_%d", valueSize))
defer eng.Close()
b.SetBytes(int64(valueSize))
var expected *roachpb.Value
if createFirst {
for i := 0; i < b.N; i++ {
key := roachpb.Key(encoding.EncodeUvarintAscending(keyBuf[:4], uint64(i)))
ts := makeTS(timeutil.Now().UnixNano(), 0)
if err := MVCCPut(context.Background(), eng, nil, key, ts, value, nil); err != nil {
b.Fatalf("failed put: %s", err)
}
}
expected = &value
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
key := roachpb.Key(encoding.EncodeUvarintAscending(keyBuf[:4], uint64(i)))
ts := makeTS(timeutil.Now().UnixNano(), 0)
if err := MVCCConditionalPut(context.Background(), eng, nil, key, ts, value, expected, nil); err != nil {
b.Fatalf("failed put: %s", err)
}
}
b.StopTimer()
}
示例2: Heartbeat
// Heartbeat is called to update a node's expiration timestamp. This
// method does a conditional put on the node liveness record, and if
// successful, stores the updated liveness record in the nodes map.
func (nl *NodeLiveness) Heartbeat(ctx context.Context, liveness *Liveness) error {
defer func(start time.Time) {
if dur := timeutil.Now().Sub(start); dur > time.Second {
log.Warningf(ctx, "slow heartbeat took %0.1fs", dur.Seconds())
}
}(timeutil.Now())
// Allow only one heartbeat at a time.
select {
case nl.heartbeatSem <- struct{}{}:
case <-ctx.Done():
return ctx.Err()
}
defer func() {
<-nl.heartbeatSem
}()
nodeID := nl.gossip.NodeID.Get()
var newLiveness Liveness
if liveness == nil {
newLiveness = Liveness{
NodeID: nodeID,
Epoch: 1,
}
} else {
newLiveness = *liveness
}
// We need to add the maximum clock offset to the expiration because it's
// used when determining liveness for a node.
newLiveness.Expiration = nl.clock.Now().Add(
(nl.livenessThreshold + nl.clock.MaxOffset()).Nanoseconds(), 0)
if err := nl.updateLiveness(ctx, &newLiveness, liveness, func(actual Liveness) error {
// Update liveness to actual value on mismatch.
nl.mu.Lock()
nl.mu.self = actual
nl.mu.Unlock()
// If the actual liveness is different than expected, but is
// considered live, treat the heartbeat as a success. This can
// happen when the periodic heartbeater races with a concurrent
// lease acquisition.
if actual.isLive(nl.clock.Now(), nl.clock.MaxOffset()) {
return errNodeAlreadyLive
}
// Otherwise, return error.
return errSkippedHeartbeat
}); err != nil {
if err == errNodeAlreadyLive {
return nil
}
nl.metrics.HeartbeatFailures.Inc(1)
return err
}
log.VEventf(ctx, 1, "heartbeat %+v", newLiveness.Expiration)
nl.mu.Lock()
nl.mu.self = newLiveness
nl.mu.Unlock()
nl.metrics.HeartbeatSuccesses.Inc(1)
return nil
}
示例3: testGossipPeeringsInner
func testGossipPeeringsInner(
ctx context.Context, t *testing.T, c cluster.Cluster, cfg cluster.TestConfig,
) {
num := c.NumNodes()
deadline := timeutil.Now().Add(cfg.Duration)
waitTime := longWaitTime
if cfg.Duration < waitTime {
waitTime = shortWaitTime
}
for timeutil.Now().Before(deadline) {
CheckGossip(ctx, t, c, waitTime, HasPeers(num))
// Restart the first node.
log.Infof(ctx, "restarting node 0")
if err := c.Restart(ctx, 0); err != nil {
t.Fatal(err)
}
CheckGossip(ctx, t, c, waitTime, HasPeers(num))
// Restart another node (if there is one).
var pickedNode int
if num > 1 {
pickedNode = rand.Intn(num-1) + 1
}
log.Infof(ctx, "restarting node %d", pickedNode)
if err := c.Restart(ctx, pickedNode); err != nil {
t.Fatal(err)
}
CheckGossip(ctx, t, c, waitTime, HasPeers(num))
}
}
示例4: monitor
func (z *zeroSum) monitor(d time.Duration) {
start := timeutil.Now()
lastTime := start
var lastOps uint64
for ticks := 0; true; ticks++ {
time.Sleep(d)
if ticks%20 == 0 {
fmt.Printf("_elapsed__accounts_________ops__ops/sec___errors___splits____xfers___ranges_____________replicas\n")
}
now := timeutil.Now()
elapsed := now.Sub(lastTime).Seconds()
ops := atomic.LoadUint64(&z.stats.ops)
z.ranges.Lock()
ranges, replicas := z.ranges.count, z.ranges.replicas
z.ranges.Unlock()
fmt.Printf("%8s %9d %11d %8.1f %8d %8d %8d %8d %20s\n",
time.Duration(now.Sub(start).Seconds()+0.5)*time.Second,
z.accountsLen(), ops, float64(ops-lastOps)/elapsed,
atomic.LoadUint64(&z.stats.errors),
atomic.LoadUint64(&z.stats.splits),
atomic.LoadUint64(&z.stats.transfers),
ranges, z.formatReplicas(replicas))
lastTime = now
lastOps = ops
}
}
示例5: AfterTest
// AfterTest snapshots the currently-running goroutines and returns a
// function to be run at the end of tests to see whether any
// goroutines leaked.
func AfterTest(t testing.TB) func() {
orig := interestingGoroutines()
return func() {
if t.Failed() {
return
}
if r := recover(); r != nil {
panic(r)
}
// Loop, waiting for goroutines to shut down.
// Wait up to 5 seconds, but finish as quickly as possible.
deadline := timeutil.Now().Add(5 * time.Second)
for {
var leaked []string
for id, stack := range interestingGoroutines() {
if _, ok := orig[id]; !ok {
leaked = append(leaked, stack)
}
}
if len(leaked) == 0 {
return
}
if timeutil.Now().Before(deadline) {
time.Sleep(50 * time.Millisecond)
continue
}
sort.Strings(leaked)
for _, g := range leaked {
t.Errorf("Leaked goroutine: %v", g)
}
return
}
}
}
示例6: insertLoad
// insertLoad add a very basic load that inserts into a unique table and checks
// that the inserted values are indeed correct.
func insertLoad(t *testing.T, dc *dynamicClient, ID int) {
// Initialize the db.
if _, err := dc.exec(`CREATE DATABASE IF NOT EXISTS Insert`); err != nil {
t.Fatal(err)
}
tableName := fmt.Sprintf("Insert.Table%d", ID)
createTableStatement := fmt.Sprintf(`
CREATE TABLE %s (
key INT PRIMARY KEY,
value INT NOT NULL
)`, tableName)
insertStatement := fmt.Sprintf(`INSERT INTO %s (key, value) VALUES ($1, $1)`, tableName)
selectStatement := fmt.Sprintf(`SELECT key-value AS "total" FROM %s WHERE key = $1`, tableName)
// Init the db for the basic insert.
if _, err := dc.exec(createTableStatement); err != nil {
t.Fatal(err)
}
var valueCheck, valueInsert int
nextUpdate := timeutil.Now()
// Perform inserts and selects
for dc.isRunning() {
// Insert some values.
valueInsert++
if _, err := dc.exec(insertStatement, valueInsert); err != nil {
if err == errTestFinished {
return
}
t.Fatal(err)
}
// Check that another value is still correct.
valueCheck--
if valueCheck < 1 {
valueCheck = valueInsert
}
var total int
if err := dc.queryRowScan(selectStatement, []interface{}{valueCheck}, []interface{}{&total}); err != nil {
if err == errTestFinished {
return
}
t.Fatal(err)
}
if total != 0 {
t.Fatalf("total expected to be 0, is %d", total)
}
if timeutil.Now().After(nextUpdate) {
log.Infof(context.TODO(), "Insert %d: inserted and checked %d values", ID, valueInsert)
nextUpdate = timeutil.Now().Add(time.Second)
}
}
}
示例7: Logs
// Logs returns the log entries parsed from the log files stored on
// the server. Log entries are returned in reverse chronological order. The
// following options are available:
// * "starttime" query parameter filters the log entries to only ones that
// occurred on or after the "starttime". Defaults to a day ago.
// * "endtime" query parameter filters the log entries to only ones that
// occurred before on on the "endtime". Defaults to the current time.
// * "pattern" query parameter filters the log entries by the provided regexp
// pattern if it exists. Defaults to nil.
// * "max" query parameter is the hard limit of the number of returned log
// entries. Defaults to defaultMaxLogEntries.
// * "level" query parameter filters the log entries to be those of the
// corresponding severity level or worse. Defaults to "info".
func (s *statusServer) Logs(
_ context.Context, req *serverpb.LogsRequest,
) (*serverpb.LogEntriesResponse, error) {
log.Flush()
var sev log.Severity
if len(req.Level) == 0 {
sev = log.Severity_INFO
} else {
var sevFound bool
sev, sevFound = log.SeverityByName(req.Level)
if !sevFound {
return nil, fmt.Errorf("level could not be determined: %s", req.Level)
}
}
startTimestamp, err := parseInt64WithDefault(
req.StartTime,
timeutil.Now().AddDate(0, 0, -1).UnixNano())
if err != nil {
return nil, grpc.Errorf(codes.InvalidArgument, "StartTime could not be parsed: %s", err)
}
endTimestamp, err := parseInt64WithDefault(req.EndTime, timeutil.Now().UnixNano())
if err != nil {
return nil, grpc.Errorf(codes.InvalidArgument, "EndTime could not be parsed: %s", err)
}
if startTimestamp > endTimestamp {
return nil, grpc.Errorf(codes.InvalidArgument, "StartTime: %d should not be greater than endtime: %d", startTimestamp, endTimestamp)
}
maxEntries, err := parseInt64WithDefault(req.Max, defaultMaxLogEntries)
if err != nil {
return nil, grpc.Errorf(codes.InvalidArgument, "Max could not be parsed: %s", err)
}
if maxEntries < 1 {
return nil, grpc.Errorf(codes.InvalidArgument, "Max: %d should be set to a value greater than 0", maxEntries)
}
var regex *regexp.Regexp
if len(req.Pattern) > 0 {
if regex, err = regexp.Compile(req.Pattern); err != nil {
return nil, grpc.Errorf(codes.InvalidArgument, "regex pattern could not be compiled: %s", err)
}
}
entries, err := log.FetchEntriesFromFiles(sev, startTimestamp, endTimestamp, int(maxEntries), regex)
if err != nil {
return nil, err
}
return &serverpb.LogEntriesResponse{Entries: entries}, nil
}
示例8: maybeRunPeriodicCheck
// If the time is greater than the timestamp stored at `key`, run `f`.
// Before running `f`, the timestamp is updated forward by a small amount via
// a compare-and-swap to ensure at-most-one concurrent execution. After `f`
// executes the timestamp is set to the next execution time.
// Returns how long until `f` should be run next (i.e. when this method should
// be called again).
func (s *Server) maybeRunPeriodicCheck(
op string, key roachpb.Key, f func(context.Context),
) time.Duration {
ctx, span := s.AnnotateCtxWithSpan(context.Background(), "op")
defer span.Finish()
// Add the op name to the log context.
ctx = log.WithLogTag(ctx, op, nil)
resp, err := s.db.Get(ctx, key)
if err != nil {
log.Infof(ctx, "error reading time: %s", err)
return updateCheckRetryFrequency
}
// We should early returned below if either the next check time is in the
// future or if the atomic compare-and-set of that time failed (which
// would happen if two nodes tried at the same time).
if resp.Exists() {
whenToCheck, pErr := resp.Value.GetTime()
if pErr != nil {
log.Warningf(ctx, "error decoding time: %s", err)
return updateCheckRetryFrequency
} else if delay := whenToCheck.Sub(timeutil.Now()); delay > 0 {
return delay
}
nextRetry := whenToCheck.Add(updateCheckRetryFrequency)
if err := s.db.CPut(ctx, key, nextRetry, whenToCheck); err != nil {
if log.V(2) {
log.Infof(ctx, "could not set next version check time (maybe another node checked?): %s", err)
}
return updateCheckRetryFrequency
}
} else {
log.Infof(ctx, "No previous %s time.", op)
nextRetry := timeutil.Now().Add(updateCheckRetryFrequency)
// CPut with `nil` prev value to assert that no other node has checked.
if err := s.db.CPut(ctx, key, nextRetry, nil); err != nil {
if log.V(2) {
log.Infof(ctx, "Could not set %s time (maybe another node checked?): %v", op, err)
}
return updateCheckRetryFrequency
}
}
f(ctx)
if err := s.db.Put(ctx, key, timeutil.Now().Add(updateCheckFrequency)); err != nil {
log.Infof(ctx, "Error updating %s time: %v", op, err)
}
return updateCheckFrequency
}
示例9: testPutInner
func testPutInner(ctx context.Context, t *testing.T, c cluster.Cluster, cfg cluster.TestConfig) {
db, err := c.NewClient(ctx, 0)
if err != nil {
t.Fatal(err)
}
errs := make(chan error, c.NumNodes())
start := timeutil.Now()
deadline := start.Add(cfg.Duration)
var count int64
for i := 0; i < c.NumNodes(); i++ {
go func() {
r, _ := randutil.NewPseudoRand()
value := randutil.RandBytes(r, 8192)
for timeutil.Now().Before(deadline) {
k := atomic.AddInt64(&count, 1)
v := value[:r.Intn(len(value))]
if err := db.Put(ctx, fmt.Sprintf("%08d", k), v); err != nil {
errs <- err
return
}
}
errs <- nil
}()
}
for i := 0; i < c.NumNodes(); {
baseCount := atomic.LoadInt64(&count)
select {
case <-stopper.ShouldStop():
t.Fatalf("interrupted")
case err := <-errs:
if err != nil {
t.Fatal(err)
}
i++
case <-time.After(1 * time.Second):
// Periodically print out progress so that we know the test is still
// running.
loadedCount := atomic.LoadInt64(&count)
log.Infof(ctx, "%d (%d/s)", loadedCount, loadedCount-baseCount)
c.Assert(ctx, t)
if err := cluster.Consistent(ctx, c, 0); err != nil {
t.Fatal(err)
}
}
}
elapsed := timeutil.Since(start)
log.Infof(ctx, "%d %.1f/sec", count, float64(count)/elapsed.Seconds())
}
示例10: waitClientsStop
// Wait until all clients have stopped.
func waitClientsStop(ctx context.Context, num int, state *testState, stallDuration time.Duration) {
prevRound := atomic.LoadUint64(&state.monkeyIteration)
stallTime := timeutil.Now().Add(stallDuration)
var prevOutput string
// Spin until all clients are shut.
for numShutClients := 0; numShutClients < num; {
select {
case <-state.teardown:
case <-stopper.ShouldStop():
state.t.Fatal("interrupted")
case err := <-state.errChan:
if err != nil {
state.t.Error(err)
}
numShutClients++
case <-time.After(time.Second):
var newOutput string
if timeutil.Now().Before(state.deadline) {
curRound := atomic.LoadUint64(&state.monkeyIteration)
if curRound == prevRound {
if timeutil.Now().After(stallTime) {
atomic.StoreInt32(&state.stalled, 1)
state.t.Fatalf("Stall detected at round %d, no forward progress for %s", curRound, stallDuration)
}
} else {
prevRound = curRound
stallTime = timeutil.Now().Add(stallDuration)
}
// Periodically print out progress so that we know the test is
// still running and making progress.
counts := state.counts()
strCounts := make([]string, len(counts))
for i := range counts {
strCounts[i] = strconv.FormatUint(counts[i], 10)
}
newOutput = fmt.Sprintf("round %d: client counts: (%s)", curRound, strings.Join(strCounts, ", "))
} else {
newOutput = fmt.Sprintf("test finished, waiting for shutdown of %d clients", num-numShutClients)
}
// This just stops the logs from being a bit too spammy.
if newOutput != prevOutput {
log.Infof(ctx, newOutput)
prevOutput = newOutput
}
}
}
}
示例11: RetryForDuration
// RetryForDuration will retry the given function until it either returns
// without error, or the given duration has elapsed. The function is invoked
// immediately at first and then successively with an exponential backoff
// starting at 1ns and ending at the specified duration.
func RetryForDuration(duration time.Duration, fn func() error) error {
deadline := timeutil.Now().Add(duration)
var lastErr error
for wait := time.Duration(1); timeutil.Now().Before(deadline); wait *= 2 {
lastErr = fn()
if lastErr == nil {
return nil
}
if wait > time.Second {
wait = time.Second
}
time.Sleep(wait)
}
return lastErr
}
示例12: newTemplate
// newTemplate returns a partially-filled template.
// It should be further populated based on whether the cert is for a CA or node.
func newTemplate(commonName string) (*x509.Certificate, error) {
// Generate a random serial number.
serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128)
serialNumber, err := rand.Int(rand.Reader, serialNumberLimit)
if err != nil {
return nil, err
}
notBefore := timeutil.Now().Add(validFrom)
notAfter := notBefore.Add(validFor)
cert := &x509.Certificate{
SerialNumber: serialNumber,
Subject: pkix.Name{
Organization: []string{"Cockroach"},
CommonName: commonName,
},
NotBefore: notBefore,
NotAfter: notAfter,
KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,
}
return cert, nil
}
示例13: waitAndProcess
// waitAndProcess waits for the pace interval and processes the replica
// if repl is not nil. The method returns true when the scanner needs
// to be stopped. The method also removes a replica from queues when it
// is signaled via the removed channel.
func (rs *replicaScanner) waitAndProcess(
ctx context.Context, start time.Time, clock *hlc.Clock, stopper *stop.Stopper, repl *Replica,
) bool {
waitInterval := rs.paceInterval(start, timeutil.Now())
rs.waitTimer.Reset(waitInterval)
if log.V(6) {
log.Infof(ctx, "wait timer interval set to %s", waitInterval)
}
for {
select {
case <-rs.waitTimer.C:
if log.V(6) {
log.Infof(ctx, "wait timer fired")
}
rs.waitTimer.Read = true
if repl == nil {
return false
}
if log.V(2) {
log.Infof(ctx, "replica scanner processing %s", repl)
}
for _, q := range rs.queues {
q.MaybeAdd(repl, clock.Now())
}
return false
case repl := <-rs.removed:
rs.removeReplica(repl)
case <-stopper.ShouldStop():
return true
}
}
}
示例14: TestScannerPaceInterval
// TestScannerPaceInterval tests that paceInterval returns the correct interval.
func TestScannerPaceInterval(t *testing.T) {
defer leaktest.AfterTest(t)()
const count = 3
durations := []time.Duration{
30 * time.Millisecond,
60 * time.Millisecond,
500 * time.Millisecond,
}
// function logs an error when the actual value is not close
// to the expected value
logErrorWhenNotCloseTo := func(expected, actual time.Duration) {
delta := 1 * time.Millisecond
if actual < expected-delta || actual > expected+delta {
t.Errorf("Expected duration %s, got %s", expected, actual)
}
}
for _, duration := range durations {
startTime := timeutil.Now()
ranges := newTestRangeSet(count, t)
s := newReplicaScanner(log.AmbientContext{}, duration, 0, ranges)
interval := s.paceInterval(startTime, startTime)
logErrorWhenNotCloseTo(duration/count, interval)
// The range set is empty
ranges = newTestRangeSet(0, t)
s = newReplicaScanner(log.AmbientContext{}, duration, 0, ranges)
interval = s.paceInterval(startTime, startTime)
logErrorWhenNotCloseTo(duration, interval)
ranges = newTestRangeSet(count, t)
s = newReplicaScanner(log.AmbientContext{}, duration, 0, ranges)
// Move the present to duration time into the future
interval = s.paceInterval(startTime, startTime.Add(duration))
logErrorWhenNotCloseTo(0, interval)
}
}
示例15: Read
func (c *readTimeoutConn) Read(b []byte) (int, error) {
// readTimeout is the amount of time ReadTimeoutConn should wait on a
// read before checking for exit conditions. The tradeoff is between the
// time it takes to react to session context cancellation and the overhead
// of waking up and checking for exit conditions.
const readTimeout = 150 * time.Millisecond
// Remove the read deadline when returning from this function to avoid
// unexpected behavior.
defer func() { _ = c.SetReadDeadline(time.Time{}) }()
for {
if err := c.checkExitConds(); err != nil {
return 0, err
}
if err := c.SetReadDeadline(timeutil.Now().Add(readTimeout)); err != nil {
return 0, err
}
n, err := c.Conn.Read(b)
// Continue if the error is due to timing out.
if err, ok := err.(net.Error); ok && err.Timeout() {
continue
}
return n, err
}
}