本文整理匯總了Golang中github.com/cockroachdb/cockroach/pkg/util/hlc.NewManualClock函數的典型用法代碼示例。如果您正苦於以下問題:Golang NewManualClock函數的具體用法?Golang NewManualClock怎麽用?Golang NewManualClock使用的例子?那麽, 這裏精選的函數代碼示例或許可以為您提供幫助。
在下文中一共展示了NewManualClock函數的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: TestTimestampSelectionInOptions
// TestTimestampSelectionInOptions verifies that a client can set the
// Txn timestamp using client.TxnExecOptions.
func TestTimestampSelectionInOptions(t *testing.T) {
defer leaktest.AfterTest(t)()
db := NewDB(newTestSender(nil, nil))
txn := NewTxn(context.Background(), *db)
mc := hlc.NewManualClock(100)
clock := hlc.NewClock(mc.UnixNano, time.Nanosecond)
execOpt := TxnExecOptions{
Clock: clock,
}
refTimestamp := clock.Now()
txnClosure := func(txn *Txn, opt *TxnExecOptions) error {
// Ensure the KV transaction is created.
return txn.Put("a", "b")
}
if err := txn.Exec(execOpt, txnClosure); err != nil {
t.Fatal(err)
}
// Check the timestamp was initialized.
if txn.Proto.OrigTimestamp.WallTime != refTimestamp.WallTime {
t.Errorf("expected txn orig ts to be %s; got %s", refTimestamp, txn.Proto.OrigTimestamp)
}
}
示例2: TestClockOffsetMetrics
func TestClockOffsetMetrics(t *testing.T) {
defer leaktest.AfterTest(t)()
stopper := stop.NewStopper()
defer stopper.Stop()
clock := hlc.NewClock(hlc.NewManualClock(123).UnixNano, 20*time.Nanosecond)
monitor := newRemoteClockMonitor(clock, time.Hour)
monitor.mu.offsets = map[string]RemoteOffset{
"0": {
Offset: 13,
Uncertainty: 7,
MeasuredAt: 6,
},
}
if err := monitor.VerifyClockOffset(context.TODO()); err != nil {
t.Fatal(err)
}
if a, e := monitor.Metrics().ClockOffsetMeanNanos.Value(), int64(13); a != e {
t.Errorf("mean %d != expected %d", a, e)
}
if a, e := monitor.Metrics().ClockOffsetStdDevNanos.Value(), int64(7); a != e {
t.Errorf("stdDev %d != expected %d", a, e)
}
}
示例3: TestReacquireLease
func TestReacquireLease(t *testing.T) {
defer leaktest.AfterTest(t)()
s, db := setup(t)
defer s.Stopper().Stop()
ctx := context.Background()
manual := hlc.NewManualClock(123)
clock := hlc.NewClock(manual.UnixNano, time.Nanosecond)
lm := client.NewLeaseManager(db, clock, client.LeaseManagerOptions{ClientID: clientID1})
l, err := lm.AcquireLease(ctx, leaseKey)
if err != nil {
t.Fatal(err)
}
// We allow re-acquiring the same lease as long as the client ID is
// the same to allow a client to reacquire its own leases rather than
// having to wait them out if it crashes and restarts.
l, err = lm.AcquireLease(ctx, leaseKey)
if err != nil {
t.Fatal(err)
}
if err := lm.ReleaseLease(ctx, l); err != nil {
t.Fatal(err)
}
}
示例4: TestTimestampCacheNoEviction
// TestTimestampCacheNoEviction verifies that even after
// the MinTSCacheWindow interval, if the cache has not hit
// its size threshold, it will not evict entries.
func TestTimestampCacheNoEviction(t *testing.T) {
defer leaktest.AfterTest(t)()
manual := hlc.NewManualClock(123)
clock := hlc.NewClock(manual.UnixNano, time.Nanosecond)
tc := newTimestampCache(clock)
// Increment time to the low water mark + 1.
manual.Increment(1)
aTS := clock.Now()
tc.add(roachpb.Key("a"), nil, aTS, nil, true)
tc.AddRequest(cacheRequest{
reads: []roachpb.Span{{Key: roachpb.Key("c")}},
timestamp: aTS,
})
// Increment time by the MinTSCacheWindow and add another key.
manual.Increment(MinTSCacheWindow.Nanoseconds())
tc.add(roachpb.Key("b"), nil, clock.Now(), nil, true)
tc.AddRequest(cacheRequest{
reads: []roachpb.Span{{Key: roachpb.Key("d")}},
timestamp: clock.Now(),
})
// Verify that the cache still has 4 entries in it
if l, want := tc.len(), 4; l != want {
t.Errorf("expected %d entries to remain, got %d", want, l)
}
}
示例5: TestTimestampCacheClear
func TestTimestampCacheClear(t *testing.T) {
defer leaktest.AfterTest(t)()
manual := hlc.NewManualClock(123)
clock := hlc.NewClock(manual.UnixNano, time.Nanosecond)
tc := newTimestampCache(clock)
key := roachpb.Key("a")
ts := clock.Now()
tc.add(key, nil, ts, nil, true)
manual.Increment(5000000)
expTS := clock.Now()
// Clear the cache, which will reset the low water mark to
// the current time.
tc.Clear(expTS)
// Fetching any keys should give current time.
if rTS, _, ok := tc.GetMaxRead(key, nil); ok {
t.Errorf("expected %s to have cleared timestamp", key)
} else if !rTS.Equal(expTS) {
t.Errorf("expected %s, got %s", rTS, expTS)
}
}
示例6: TestStoreRangeMergeStats
// TestStoreRangeMergeStats starts by splitting a range, then writing random data
// to both sides of the split. It then merges the ranges and verifies the merged
// range has stats consistent with recomputations.
func TestStoreRangeMergeStats(t *testing.T) {
defer leaktest.AfterTest(t)()
manual := hlc.NewManualClock(123)
storeCfg := storage.TestStoreConfig(hlc.NewClock(manual.UnixNano, time.Nanosecond))
storeCfg.TestingKnobs.DisableSplitQueue = true
store, stopper := createTestStoreWithConfig(t, storeCfg)
defer stopper.Stop()
// Split the range.
aDesc, bDesc, pErr := createSplitRanges(store)
if pErr != nil {
t.Fatal(pErr)
}
// Write some values left and right of the proposed split key.
writeRandomDataToRange(t, store, aDesc.RangeID, []byte("aaa"))
writeRandomDataToRange(t, store, bDesc.RangeID, []byte("ccc"))
// Get the range stats for both ranges now that we have data.
snap := store.Engine().NewSnapshot()
defer snap.Close()
msA, err := engine.MVCCGetRangeStats(context.Background(), snap, aDesc.RangeID)
if err != nil {
t.Fatal(err)
}
msB, err := engine.MVCCGetRangeStats(context.Background(), snap, bDesc.RangeID)
if err != nil {
t.Fatal(err)
}
// Stats should agree with recomputation.
if err := verifyRecomputedStats(snap, aDesc, msA, manual.UnixNano()); err != nil {
t.Fatalf("failed to verify range A's stats before split: %v", err)
}
if err := verifyRecomputedStats(snap, bDesc, msB, manual.UnixNano()); err != nil {
t.Fatalf("failed to verify range B's stats before split: %v", err)
}
manual.Increment(100)
// Merge the b range back into the a range.
args := adminMergeArgs(roachpb.KeyMin)
if _, err := client.SendWrapped(context.Background(), rg1(store), &args); err != nil {
t.Fatal(err)
}
replMerged := store.LookupReplica(aDesc.StartKey, nil)
// Get the range stats for the merged range and verify.
snap = store.Engine().NewSnapshot()
defer snap.Close()
msMerged, err := engine.MVCCGetRangeStats(context.Background(), snap, replMerged.RangeID)
if err != nil {
t.Fatal(err)
}
// Merged stats should agree with recomputation.
if err := verifyRecomputedStats(snap, replMerged.Desc(), msMerged, manual.UnixNano()); err != nil {
t.Errorf("failed to verify range's stats after merge: %v", err)
}
}
示例7: TestVerifyClockOffset
func TestVerifyClockOffset(t *testing.T) {
defer leaktest.AfterTest(t)()
clock := hlc.NewClock(hlc.NewManualClock(123).UnixNano, 50*time.Nanosecond)
monitor := newRemoteClockMonitor(clock, time.Hour)
for idx, tc := range []struct {
offsets []RemoteOffset
expectedError bool
}{
// no error if no offsets.
{[]RemoteOffset{}, false},
// no error when a majority of offsets are under the maximum tolerated offset.
{[]RemoteOffset{{Offset: 20, Uncertainty: 10}, {Offset: 48, Uncertainty: 20}, {Offset: 61, Uncertainty: 25}, {Offset: 91, Uncertainty: 31}}, false},
// error when less than a majority of offsets are under the maximum tolerated offset.
{[]RemoteOffset{{Offset: 20, Uncertainty: 10}, {Offset: 58, Uncertainty: 20}, {Offset: 85, Uncertainty: 25}, {Offset: 91, Uncertainty: 31}}, true},
} {
monitor.mu.offsets = make(map[string]RemoteOffset)
for i, offset := range tc.offsets {
monitor.mu.offsets[strconv.Itoa(i)] = offset
}
if tc.expectedError {
if err := monitor.VerifyClockOffset(context.TODO()); !testutils.IsError(err, errOffsetGreaterThanMaxOffset) {
t.Errorf("%d: unexpected error %v", idx, err)
}
} else {
if err := monitor.VerifyClockOffset(context.TODO()); err != nil {
t.Errorf("%d: unexpected error %s", idx, err)
}
}
}
}
示例8: TestHeartbeatReply
func TestHeartbeatReply(t *testing.T) {
defer leaktest.AfterTest(t)()
manual := hlc.NewManualClock(5)
clock := hlc.NewClock(manual.UnixNano, time.Nanosecond)
heartbeat := &HeartbeatService{
clock: clock,
remoteClockMonitor: newRemoteClockMonitor(clock, time.Hour),
}
request := &PingRequest{
Ping: "testPing",
}
response, err := heartbeat.Ping(context.Background(), request)
if err != nil {
t.Fatal(err)
}
if response.Pong != request.Ping {
t.Errorf("expected %s to be equal to %s", response.Pong, request.Ping)
}
if response.ServerTime != 5 {
t.Errorf("expected server time 5, instead %d", response.ServerTime)
}
}
示例9: TestScannerTiming
// TestScannerTiming verifies that ranges are scanned, regardless
// of how many, to match scanInterval.
func TestScannerTiming(t *testing.T) {
defer leaktest.AfterTest(t)()
const count = 3
const runTime = 100 * time.Millisecond
const maxError = 7500 * time.Microsecond
durations := []time.Duration{
15 * time.Millisecond,
25 * time.Millisecond,
}
for i, duration := range durations {
testutils.SucceedsSoon(t, func() error {
ranges := newTestRangeSet(count, t)
q := &testQueue{}
s := newReplicaScanner(log.AmbientContext{}, duration, 0, ranges)
s.AddQueues(q)
mc := hlc.NewManualClock(123)
clock := hlc.NewClock(mc.UnixNano, time.Nanosecond)
stopper := stop.NewStopper()
s.Start(clock, stopper)
time.Sleep(runTime)
stopper.Stop()
avg := s.avgScan()
log.Infof(context.Background(), "%d: average scan: %s", i, avg)
if avg.Nanoseconds()-duration.Nanoseconds() > maxError.Nanoseconds() ||
duration.Nanoseconds()-avg.Nanoseconds() > maxError.Nanoseconds() {
return errors.Errorf("expected %s, got %s: exceeds max error of %s", duration, avg, maxError)
}
return nil
})
}
}
示例10: TestAcquireAndRelease
func TestAcquireAndRelease(t *testing.T) {
defer leaktest.AfterTest(t)()
s, db := setup(t)
defer s.Stopper().Stop()
ctx := context.Background()
manual := hlc.NewManualClock(123)
clock := hlc.NewClock(manual.UnixNano, time.Nanosecond)
lm := client.NewLeaseManager(db, clock, client.LeaseManagerOptions{ClientID: clientID1})
l, err := lm.AcquireLease(ctx, leaseKey)
if err != nil {
t.Fatal(err)
}
if err := lm.ReleaseLease(ctx, l); err != nil {
t.Fatal(err)
}
if err := lm.ReleaseLease(ctx, l); !testutils.IsError(err, "unexpected value") {
t.Fatal(err)
}
l, err = lm.AcquireLease(ctx, leaseKey)
if err != nil {
t.Fatal(err)
}
if err := lm.ReleaseLease(ctx, l); err != nil {
t.Fatal(err)
}
}
示例11: TestTxnCoordSenderSingleRoundtripTxn
// TestTxnCoordSenderSingleRoundtripTxn checks that a batch which completely
// holds the writing portion of a Txn (including EndTransaction) does not
// launch a heartbeat goroutine at all.
func TestTxnCoordSenderSingleRoundtripTxn(t *testing.T) {
defer leaktest.AfterTest(t)()
stopper := stop.NewStopper()
manual := hlc.NewManualClock(123)
clock := hlc.NewClock(manual.UnixNano, 20*time.Nanosecond)
senderFunc := func(_ context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) {
br := ba.CreateReply()
txnClone := ba.Txn.Clone()
br.Txn = &txnClone
br.Txn.Writing = true
return br, nil
}
ambient := log.AmbientContext{Tracer: tracing.NewTracer()}
ts := NewTxnCoordSender(
ambient, senderFn(senderFunc), clock, false, stopper, MakeTxnMetrics(metric.TestSampleInterval),
)
// Stop the stopper manually, prior to trying the transaction. This has the
// effect of returning a NodeUnavailableError for any attempts at launching
// a heartbeat goroutine.
stopper.Stop()
var ba roachpb.BatchRequest
key := roachpb.Key("test")
ba.Add(&roachpb.BeginTransactionRequest{Span: roachpb.Span{Key: key}})
ba.Add(&roachpb.PutRequest{Span: roachpb.Span{Key: key}})
ba.Add(&roachpb.EndTransactionRequest{})
ba.Txn = &roachpb.Transaction{Name: "test"}
_, pErr := ts.Send(context.Background(), ba)
if pErr != nil {
t.Fatal(pErr)
}
}
示例12: TestTimestampCacheEqualTimestamps
// TestTimestampCacheEqualTimestamp verifies that in the event of two
// non-overlapping transactions with equal timestamps, the returned
// timestamp is not owned by either one.
func TestTimestampCacheEqualTimestamps(t *testing.T) {
defer leaktest.AfterTest(t)()
manual := hlc.NewManualClock(123)
clock := hlc.NewClock(manual.UnixNano, time.Nanosecond)
tc := newTimestampCache(clock)
txn1 := uuid.MakeV4()
txn2 := uuid.MakeV4()
// Add two non-overlapping transactions at the same timestamp.
ts1 := clock.Now()
tc.add(roachpb.Key("a"), roachpb.Key("b"), ts1, &txn1, true)
tc.add(roachpb.Key("b"), roachpb.Key("c"), ts1, &txn2, true)
// When querying either side separately, the transaction ID is returned.
if ts, txn, _ := tc.GetMaxRead(roachpb.Key("a"), roachpb.Key("b")); !ts.Equal(ts1) {
t.Errorf("expected 'a'-'b' to have timestamp %s, but found %s", ts1, ts)
} else if *txn != txn1 {
t.Errorf("expected 'a'-'b' to have txn id %s, but found %s", txn1, txn)
}
if ts, txn, _ := tc.GetMaxRead(roachpb.Key("b"), roachpb.Key("c")); !ts.Equal(ts1) {
t.Errorf("expected 'b'-'c' to have timestamp %s, but found %s", ts1, ts)
} else if *txn != txn2 {
t.Errorf("expected 'b'-'c' to have txn id %s, but found %s", txn2, txn)
}
// Querying a span that overlaps both returns a nil txn ID; neither
// can proceed here.
if ts, txn, _ := tc.GetMaxRead(roachpb.Key("a"), roachpb.Key("c")); !ts.Equal(ts1) {
t.Errorf("expected 'a'-'c' to have timestamp %s, but found %s", ts1, ts)
} else if txn != nil {
t.Errorf("expected 'a'-'c' to have nil txn id, but found %s", txn)
}
}
示例13: TestTxnCoordSenderErrorWithIntent
// TestTxnCoordSenderErrorWithIntent validates that if a transactional request
// returns an error but also indicates a Writing transaction, the coordinator
// tracks it just like a successful request.
func TestTxnCoordSenderErrorWithIntent(t *testing.T) {
defer leaktest.AfterTest(t)()
stopper := stop.NewStopper()
defer stopper.Stop()
manual := hlc.NewManualClock(0)
clock := hlc.NewClock(manual.UnixNano)
clock.SetMaxOffset(20)
testCases := []struct {
roachpb.Error
errMsg string
}{
{*roachpb.NewError(roachpb.NewTransactionRetryError()), "retry txn"},
{*roachpb.NewError(roachpb.NewTransactionPushError(roachpb.Transaction{
TxnMeta: enginepb.TxnMeta{
ID: uuid.NewV4(),
}})), "failed to push"},
{*roachpb.NewErrorf("testError"), "testError"},
}
for i, test := range testCases {
func() {
senderFunc := func(_ context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) {
txn := ba.Txn.Clone()
txn.Writing = true
pErr := &roachpb.Error{}
*pErr = test.Error
pErr.SetTxn(&txn)
return nil, pErr
}
ambient := log.AmbientContext{Tracer: tracing.NewTracer()}
ts := NewTxnCoordSender(
ambient,
senderFn(senderFunc),
clock,
false,
stopper,
MakeTxnMetrics(metric.TestSampleInterval),
)
var ba roachpb.BatchRequest
key := roachpb.Key("test")
ba.Add(&roachpb.BeginTransactionRequest{Span: roachpb.Span{Key: key}})
ba.Add(&roachpb.PutRequest{Span: roachpb.Span{Key: key}})
ba.Add(&roachpb.EndTransactionRequest{})
ba.Txn = &roachpb.Transaction{Name: "test"}
_, pErr := ts.Send(context.Background(), ba)
if !testutils.IsPError(pErr, test.errMsg) {
t.Errorf("%d: error did not match %s: %v", i, test.errMsg, pErr)
}
defer teardownHeartbeats(ts)
ts.Lock()
defer ts.Unlock()
if len(ts.txns) != 1 {
t.Errorf("%d: expected transaction to be tracked", i)
}
}()
}
}
示例14: TestLeasesMultipleClients
func TestLeasesMultipleClients(t *testing.T) {
defer leaktest.AfterTest(t)()
s, db := setup(t)
defer s.Stopper().Stop()
ctx := context.Background()
manual1 := hlc.NewManualClock(123)
clock1 := hlc.NewClock(manual1.UnixNano, time.Nanosecond)
manual2 := hlc.NewManualClock(123)
clock2 := hlc.NewClock(manual2.UnixNano, time.Nanosecond)
lm1 := client.NewLeaseManager(db, clock1, client.LeaseManagerOptions{ClientID: clientID1})
lm2 := client.NewLeaseManager(db, clock2, client.LeaseManagerOptions{ClientID: clientID2})
l1, err := lm1.AcquireLease(ctx, leaseKey)
if err != nil {
t.Fatal(err)
}
_, err = lm2.AcquireLease(ctx, leaseKey)
if !testutils.IsError(err, "is not available until") {
t.Fatalf("didn't get expected error trying to acquire already held lease: %v", err)
}
if _, ok := err.(*client.LeaseNotAvailableError); !ok {
t.Fatalf("expected LeaseNotAvailableError, got %v", err)
}
// Ensure a lease can be "stolen" after it's expired.
manual2.Increment(int64(client.DefaultLeaseDuration) + 1)
l2, err := lm2.AcquireLease(ctx, leaseKey)
if err != nil {
t.Fatal(err)
}
// lm1's clock indicates that its lease should still be valid, but it doesn't
// own it anymore.
manual1.Increment(int64(client.DefaultLeaseDuration) / 2)
if err := lm1.ExtendLease(ctx, l1); !testutils.IsError(err, "out of sync with DB state") {
t.Fatalf("didn't get expected error trying to extend expired lease: %v", err)
}
if err := lm1.ReleaseLease(ctx, l1); !testutils.IsError(err, "unexpected value") {
t.Fatalf("didn't get expected error trying to release stolen lease: %v", err)
}
if err := lm2.ReleaseLease(ctx, l2); err != nil {
t.Fatal(err)
}
}
示例15: Start
// Start starts the test cluster by bootstrapping an in-memory store
// (defaults to maximum of 50M). The server is started, launching the
// node RPC server and all HTTP endpoints. Use the value of
// TestServer.Addr after Start() for client connections. Use Stop()
// to shutdown the server after the test completes.
func (ltc *LocalTestCluster) Start(t util.Tester, baseCtx *base.Config, initSender InitSenderFn) {
ambient := log.AmbientContext{Tracer: tracing.NewTracer()}
nc := &base.NodeIDContainer{}
ambient.AddLogTag("n", nc)
nodeID := roachpb.NodeID(1)
nodeDesc := &roachpb.NodeDescriptor{NodeID: nodeID}
ltc.tester = t
ltc.Manual = hlc.NewManualClock(0)
ltc.Clock = hlc.NewClock(ltc.Manual.UnixNano)
ltc.Stopper = stop.NewStopper()
rpcContext := rpc.NewContext(ambient, baseCtx, ltc.Clock, ltc.Stopper)
server := rpc.NewServer(rpcContext) // never started
ltc.Gossip = gossip.New(ambient, nc, rpcContext, server, nil, ltc.Stopper, metric.NewRegistry())
ltc.Eng = engine.NewInMem(roachpb.Attributes{}, 50<<20)
ltc.Stopper.AddCloser(ltc.Eng)
ltc.Stores = storage.NewStores(ambient, ltc.Clock)
ltc.Sender = initSender(nodeDesc, ambient.Tracer, ltc.Clock, ltc.Latency, ltc.Stores, ltc.Stopper,
ltc.Gossip)
if ltc.DBContext == nil {
dbCtx := client.DefaultDBContext()
ltc.DBContext = &dbCtx
}
ltc.DB = client.NewDBWithContext(ltc.Sender, *ltc.DBContext)
transport := storage.NewDummyRaftTransport()
cfg := storage.TestStoreConfig()
if ltc.RangeRetryOptions != nil {
cfg.RangeRetryOptions = *ltc.RangeRetryOptions
}
cfg.AmbientCtx = ambient
cfg.Clock = ltc.Clock
cfg.DB = ltc.DB
cfg.Gossip = ltc.Gossip
cfg.Transport = transport
cfg.MetricsSampleInterval = metric.TestSampleInterval
ltc.Store = storage.NewStore(cfg, ltc.Eng, nodeDesc)
if err := ltc.Store.Bootstrap(roachpb.StoreIdent{NodeID: nodeID, StoreID: 1}); err != nil {
t.Fatalf("unable to start local test cluster: %s", err)
}
ltc.Stores.AddStore(ltc.Store)
if err := ltc.Store.BootstrapRange(nil); err != nil {
t.Fatalf("unable to start local test cluster: %s", err)
}
if err := ltc.Store.Start(context.Background(), ltc.Stopper); err != nil {
t.Fatalf("unable to start local test cluster: %s", err)
}
nc.Set(context.TODO(), nodeDesc.NodeID)
if err := ltc.Gossip.SetNodeDescriptor(nodeDesc); err != nil {
t.Fatalf("unable to set node descriptor: %s", err)
}
}