本文整理汇总了Golang中github.com/cockroachdb/cockroach/util/hlc.NewManualClock函数的典型用法代码示例。如果您正苦于以下问题:Golang NewManualClock函数的具体用法?Golang NewManualClock怎么用?Golang NewManualClock使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了NewManualClock函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: TestMultiRangeScanReverseScanInconsistent
// TestMultiRangeScanReverseScanInconsistent verifies that a Scan/ReverseScan
// across ranges that doesn't require read consistency will set a timestamp
// using the clock local to the distributed sender.
func TestMultiRangeScanReverseScanInconsistent(t *testing.T) {
defer leaktest.AfterTest(t)
s, db := setupMultipleRanges(t, "b")
defer s.Stop()
// Write keys "a" and "b", the latter of which is the first key in the
// second range.
keys := []string{"a", "b"}
ts := []time.Time{}
for i, key := range keys {
b := &client.Batch{}
b.Put(key, "value")
if err := db.Run(b); err != nil {
t.Fatal(err)
}
ts = append(ts, b.Results[0].Rows[0].Timestamp())
log.Infof("%d: %s", i, b.Results[0].Rows[0].Timestamp())
}
// Do an inconsistent Scan/ReverseScan from a new DistSender and verify
// it does the read at its local clock and doesn't receive an
// OpRequiresTxnError. We set the local clock to the timestamp of
// the first key to verify it's used to read only key "a".
manual := hlc.NewManualClock(ts[1].UnixNano() - 1)
clock := hlc.NewClock(manual.UnixNano)
ds := kv.NewDistSender(&kv.DistSenderContext{Clock: clock}, s.Gossip())
// Scan.
sa := roachpb.NewScan(roachpb.Key("a"), roachpb.Key("c"), 0).(*roachpb.ScanRequest)
reply, err := client.SendWrappedWith(ds, nil, roachpb.BatchRequest_Header{
ReadConsistency: roachpb.INCONSISTENT,
}, sa)
if err != nil {
t.Fatal(err)
}
sr := reply.(*roachpb.ScanResponse)
if l := len(sr.Rows); l != 1 {
t.Fatalf("expected 1 row; got %d", l)
}
if key := string(sr.Rows[0].Key); keys[0] != key {
t.Errorf("expected key %q; got %q", keys[0], key)
}
// ReverseScan.
rsa := roachpb.NewReverseScan(roachpb.Key("a"), roachpb.Key("c"), 0).(*roachpb.ReverseScanRequest)
reply, err = client.SendWrappedWith(ds, nil, roachpb.BatchRequest_Header{
ReadConsistency: roachpb.INCONSISTENT,
}, rsa)
if err != nil {
t.Fatal(err)
}
rsr := reply.(*roachpb.ReverseScanResponse)
if l := len(rsr.Rows); l != 1 {
t.Fatalf("expected 1 row; got %d", l)
}
if key := string(rsr.Rows[0].Key); keys[0] != key {
t.Errorf("expected key %q; got %q", keys[0], key)
}
}
示例2: TestRejectFutureCommand
// TestRejectFutureCommand verifies that lease holders reject commands that
// would cause a large time jump.
func TestRejectFutureCommand(t *testing.T) {
defer leaktest.AfterTest(t)()
const maxOffset = 100 * time.Millisecond
manual := hlc.NewManualClock(0)
clock := hlc.NewClock(manual.UnixNano)
clock.SetMaxOffset(maxOffset)
mtc := multiTestContext{
clock: clock,
}
mtc.Start(t, 1)
defer mtc.Stop()
// First do a write. The first write will advance the clock by MaxOffset
// because of the read cache's low water mark.
getArgs := putArgs([]byte("b"), []byte("b"))
if _, err := client.SendWrapped(rg1(mtc.stores[0]), nil, &getArgs); err != nil {
t.Fatal(err)
}
if now := clock.Now(); now.WallTime != int64(maxOffset) {
t.Fatalf("expected clock to advance to 100ms; got %s", now)
}
// The logical clock has advanced past the physical clock; increment
// the "physical" clock to catch up.
manual.Increment(int64(maxOffset))
startTime := manual.UnixNano()
// Commands with a future timestamp that is within the MaxOffset
// bound will be accepted and will cause the clock to advance.
for i := int64(0); i < 3; i++ {
incArgs := incrementArgs([]byte("a"), 5)
ts := hlc.ZeroTimestamp.Add(startTime+((i+1)*30)*int64(time.Millisecond), 0)
if _, err := client.SendWrappedWith(rg1(mtc.stores[0]), nil, roachpb.Header{Timestamp: ts}, &incArgs); err != nil {
t.Fatal(err)
}
}
if now := clock.Now(); now.WallTime != int64(190*time.Millisecond) {
t.Fatalf("expected clock to advance to 190ms; got %s", now)
}
// Once the accumulated offset reaches MaxOffset, commands will be rejected.
incArgs := incrementArgs([]byte("a"), 11)
ts := hlc.ZeroTimestamp.Add(int64((time.Duration(startTime)+maxOffset+1)*time.Millisecond), 0)
if _, err := client.SendWrappedWith(rg1(mtc.stores[0]), nil, roachpb.Header{Timestamp: ts}, &incArgs); err == nil {
t.Fatalf("expected clock offset error but got nil")
}
// The clock remained at 190ms and the final command was not executed.
if now := clock.Now(); now.WallTime != int64(190*time.Millisecond) {
t.Errorf("expected clock to advance to 190ms; got %s", now)
}
val, _, err := engine.MVCCGet(context.Background(), mtc.engines[0], roachpb.Key("a"), clock.Now(), true, nil)
if err != nil {
t.Fatal(err)
}
if v := mustGetInt(val); v != 15 {
t.Errorf("expected 15, got %v", v)
}
}
示例3: TestScannerAddToQueues
// TestScannerAddToQueues verifies that ranges are added to and
// removed from multiple queues.
func TestScannerAddToQueues(t *testing.T) {
const count = 3
iter := newTestIterator(count)
q1, q2 := &testQueue{}, &testQueue{}
s := newRangeScanner(1*time.Millisecond, iter)
s.AddQueues(q1, q2)
mc := hlc.NewManualClock(0)
clock := hlc.NewClock(mc.UnixNano)
stopper := util.NewStopper(0)
// Start queue and verify that all ranges are added to both queues.
s.Start(clock, stopper)
if err := util.IsTrueWithin(func() bool {
return q1.count() == count && q2.count() == count
}, 50*time.Millisecond); err != nil {
t.Error(err)
}
// Remove first range and verify it does not exist in either range.
rng := iter.remove(0)
s.RemoveRange(rng)
if err := util.IsTrueWithin(func() bool {
return q1.count() == count-1 && q2.count() == count-1
}, 10*time.Millisecond); err != nil {
t.Error(err)
}
// Stop scanner and verify both queues are stopped.
stopper.Stop()
if !q1.isDone() || !q2.isDone() {
t.Errorf("expected all queues to stop; got %t, %t", q1.isDone(), q2.isDone())
}
}
示例4: createTestStoreWithoutStart
// createTestStoreWithoutStart creates a test store using an in-memory
// engine without starting the store. It returns the store, the store
// clock's manual unix nanos time and a stopper. The caller is
// responsible for stopping the stopper upon completion.
func createTestStoreWithoutStart(t *testing.T) (*Store, *hlc.ManualClock, *stop.Stopper) {
stopper := stop.NewStopper()
// Setup fake zone config handler.
config.TestingSetupZoneConfigHook(stopper)
rpcContext := rpc.NewContext(&base.Context{}, hlc.NewClock(hlc.UnixNano), stopper)
ctx := TestStoreContext
ctx.Gossip = gossip.New(rpcContext, gossip.TestInterval, gossip.TestBootstrap)
ctx.StorePool = NewStorePool(ctx.Gossip, TestTimeUntilStoreDeadOff, stopper)
manual := hlc.NewManualClock(0)
ctx.Clock = hlc.NewClock(manual.UnixNano)
eng := engine.NewInMem(roachpb.Attributes{}, 10<<20, stopper)
ctx.Transport = multiraft.NewLocalRPCTransport(stopper)
stopper.AddCloser(ctx.Transport)
sender := &testSender{}
ctx.DB = client.NewDB(sender)
store := NewStore(ctx, eng, &roachpb.NodeDescriptor{NodeID: 1})
sender.store = store
if err := store.Bootstrap(roachpb.StoreIdent{NodeID: 1, StoreID: 1}, stopper); err != nil {
t.Fatal(err)
}
if err := store.BootstrapRange(nil); err != nil {
t.Fatal(err)
}
return store, manual, stopper
}
示例5: TestScannerTiming
// TestScannerTiming verifies that ranges are scanned, regardless
// of how many, to match scanInterval.
//
// TODO(spencer): in order to make this test not take too much time,
// we're running these loops at speeds where clock ticks may be
// an issue on virtual machines used for continuous integration.
func TestScannerTiming(t *testing.T) {
const count = 3
const runTime = 50 * time.Millisecond
const maxError = 7500 * time.Microsecond
durations := []time.Duration{
5 * time.Millisecond,
12500 * time.Microsecond,
}
for i, duration := range durations {
iter := newTestIterator(count)
q := &testQueue{}
s := newRangeScanner(duration, iter)
s.AddQueues(q)
mc := hlc.NewManualClock(0)
clock := hlc.NewClock(mc.UnixNano)
stopper := util.NewStopper(0)
defer stopper.Stop()
s.Start(clock, stopper)
time.Sleep(runTime)
avg := iter.avgScan()
log.Infof("%d: average scan: %s\n", i, avg)
if avg.Nanoseconds()-duration.Nanoseconds() > maxError.Nanoseconds() ||
duration.Nanoseconds()-avg.Nanoseconds() > maxError.Nanoseconds() {
t.Errorf("expected %s, got %s: exceeds max error of %s", duration, avg, maxError)
}
}
}
示例6: createTestBookie
// createTestBookie creates a new bookie, stopper and manual clock for testing.
func createTestBookie(reservationTimeout time.Duration) (*stop.Stopper, *hlc.ManualClock, *bookie) {
stopper := stop.NewStopper()
mc := hlc.NewManualClock(0)
clock := hlc.NewClock(mc.UnixNano)
b := newBookie(clock, reservationTimeout, stopper)
return stopper, mc, b
}
示例7: createTestStoreWithoutStart
// createTestStoreWithoutStart creates a test store using an in-memory
// engine without starting the store. It returns the store, the store
// clock's manual unix nanos time and a stopper. The caller is
// responsible for stopping the stopper upon completion.
func createTestStoreWithoutStart(t *testing.T) (*Store, *hlc.ManualClock, *stop.Stopper) {
stopper := stop.NewStopper()
rpcContext := rpc.NewContext(rootTestBaseContext, hlc.NewClock(hlc.UnixNano), stopper)
ctx := TestStoreContext
ctx.Gossip = gossip.New(rpcContext, gossip.TestInterval, gossip.TestBootstrap)
manual := hlc.NewManualClock(0)
ctx.Clock = hlc.NewClock(manual.UnixNano)
eng := engine.NewInMem(proto.Attributes{}, 10<<20)
ctx.Transport = multiraft.NewLocalRPCTransport()
stopper.AddCloser(ctx.Transport)
sender := &testSender{}
var err error
if ctx.DB, err = client.Open("//[email protected]", client.SenderOpt(sender)); err != nil {
t.Fatal(err)
}
store := NewStore(ctx, eng, &proto.NodeDescriptor{NodeID: 1})
sender.store = store
if err := store.Bootstrap(proto.StoreIdent{NodeID: 1, StoreID: 1}, stopper); err != nil {
t.Fatal(err)
}
if err := store.BootstrapRange(); err != nil {
t.Fatal(err)
}
return store, manual, stopper
}
示例8: Start
func (m *multiTestContext) Start(t *testing.T, numStores int) {
if m.manualClock == nil {
m.manualClock = hlc.NewManualClock(0)
}
if m.clock == nil {
m.clock = hlc.NewClock(m.manualClock.UnixNano)
}
if m.gossip == nil {
rpcContext := rpc.NewContext(m.clock, rpc.LoadInsecureTLSConfig())
m.gossip = gossip.New(rpcContext, gossip.TestInterval, "")
}
if m.transport == nil {
m.transport = multiraft.NewLocalRPCTransport()
}
if m.sender == nil {
m.sender = kv.NewLocalSender()
}
if m.db == nil {
txnSender := kv.NewTxnCoordSender(m.sender, m.clock, false)
m.db = client.NewKV(txnSender, nil)
m.db.User = storage.UserRoot
}
for i := 0; i < numStores; i++ {
m.addStore(t)
}
}
示例9: TestTimestampCacheReadVsWrite
// TestTimestampCacheReadVsWrite verifies that the timestamp cache
// can differentiate between read and write timestamp.
func TestTimestampCacheReadVsWrite(t *testing.T) {
defer leaktest.AfterTest(t)
manual := hlc.NewManualClock(0)
clock := hlc.NewClock(manual.UnixNano)
tc := NewTimestampCache(clock)
// Add read-only non-txn entry at current time.
ts1 := clock.Now()
tc.Add(roachpb.Key("a"), roachpb.Key("b"), ts1, nil, true)
// Add two successive txn entries; one read-only and one read-write.
txn1ID := uuid.NewUUID4()
txn2ID := uuid.NewUUID4()
ts2 := clock.Now()
tc.Add(roachpb.Key("a"), nil, ts2, txn1ID, true)
ts3 := clock.Now()
tc.Add(roachpb.Key("a"), nil, ts3, txn2ID, false)
// Fetching with no transaction gets latest values.
if rTS, wTS := tc.GetMax(roachpb.Key("a"), nil, nil); !rTS.Equal(ts2) || !wTS.Equal(ts3) {
t.Errorf("expected %s %s; got %s %s", ts2, ts3, rTS, wTS)
}
// Fetching with txn ID "1" gets original for read and most recent for write.
if rTS, wTS := tc.GetMax(roachpb.Key("a"), nil, txn1ID); !rTS.Equal(ts1) || !wTS.Equal(ts3) {
t.Errorf("expected %s %s; got %s %s", ts1, ts3, rTS, wTS)
}
// Fetching with txn ID "2" gets ts2 for read and low water mark for write.
if rTS, wTS := tc.GetMax(roachpb.Key("a"), nil, txn2ID); !rTS.Equal(ts2) || !wTS.Equal(tc.lowWater) {
t.Errorf("expected %s %s; got %s %s", ts2, tc.lowWater, rTS, wTS)
}
}
示例10: TestTxnCoordSenderSingleRoundtripTxn
// TestTxnCoordSenderSingleRoundtripTxn checks that a batch which completely
// holds the writing portion of a Txn (including EndTransaction) does not
// launch a heartbeat goroutine at all.
func TestTxnCoordSenderSingleRoundtripTxn(t *testing.T) {
defer leaktest.AfterTest(t)
stopper := stop.NewStopper()
manual := hlc.NewManualClock(0)
clock := hlc.NewClock(manual.UnixNano)
clock.SetMaxOffset(20)
ts := NewTxnCoordSender(senderFn(func(_ context.Context, ba proto.BatchRequest) (*proto.BatchResponse, *proto.Error) {
return ba.CreateReply().(*proto.BatchResponse), nil
}), clock, false, nil, stopper)
// Stop the stopper manually, prior to trying the transaction. This has the
// effect of returning a NodeUnavailableError for any attempts at launching
// a heartbeat goroutine.
stopper.Stop()
var ba proto.BatchRequest
put := &proto.PutRequest{}
put.Key = proto.Key("test")
ba.Add(put)
ba.Add(&proto.EndTransactionRequest{})
ba.Txn = &proto.Transaction{Name: "test"}
_, pErr := ts.Send(context.Background(), ba)
if pErr != nil {
t.Fatal(pErr)
}
}
示例11: TestClockOffsetMetrics
func TestClockOffsetMetrics(t *testing.T) {
defer leaktest.AfterTest(t)()
t.Skip()
stopper := stop.NewStopper()
defer stopper.Stop()
// Create a RemoteClockMonitor with a hand-picked offset.
offset := RemoteOffset{
Offset: 13,
Uncertainty: 7,
MeasuredAt: 6,
}
clock := hlc.NewClock(hlc.NewManualClock(123).UnixNano)
clock.SetMaxOffset(20 * time.Nanosecond)
monitor := newRemoteClockMonitor(clock, time.Hour)
monitor.mu.offsets = map[string]RemoteOffset{
"0": offset,
}
if err := monitor.VerifyClockOffset(); err != nil {
t.Fatal(err)
}
reg := monitor.Registry()
expLower := offset.Offset - offset.Uncertainty
if a, e := reg.GetGauge("lower-bound-nanos").Value(), expLower; a != e {
t.Errorf("lower bound %d != expected %d", a, e)
}
expHigher := offset.Offset + offset.Uncertainty
if a, e := reg.GetGauge("upper-bound-nanos").Value(), expHigher; a != e {
t.Errorf("upper bound %d != expected %d", a, e)
}
}
示例12: TestTimestampCacheWithTxnID
// TestTimestampCacheWithTxnID verifies that timestamps matching
// the specified txn ID are ignored.
func TestTimestampCacheWithTxnID(t *testing.T) {
defer leaktest.AfterTest(t)
manual := hlc.NewManualClock(0)
clock := hlc.NewClock(manual.UnixNano)
tc := NewTimestampCache(clock)
// Add two successive txn entries.
txn1ID := uuid.NewUUID4()
txn2ID := uuid.NewUUID4()
ts1 := clock.Now()
tc.Add(roachpb.Key("a"), roachpb.Key("c"), ts1, txn1ID, true)
ts2 := clock.Now()
// This entry will remove "a"-"b" from the cache.
tc.Add(roachpb.Key("b"), roachpb.Key("d"), ts2, txn2ID, true)
// Fetching with no transaction gets latest value.
if ts, _ := tc.GetMax(roachpb.Key("b"), nil, nil); !ts.Equal(ts2) {
t.Errorf("expected %s; got %s", ts2, ts)
}
// Fetching with txn ID "1" gets most recent.
if ts, _ := tc.GetMax(roachpb.Key("b"), nil, txn1ID); !ts.Equal(ts2) {
t.Errorf("expected %s; got %s", ts2, ts)
}
// Fetching with txn ID "2" skips most recent.
if ts, _ := tc.GetMax(roachpb.Key("b"), nil, txn2ID); !ts.Equal(ts1) {
t.Errorf("expected %s; got %s", ts1, ts)
}
}
示例13: TestBuildEndpointListRemoveStagnantClocks
// TestBuildEndpointListRemoveStagnantClocks tests the side effect of removing
// older offsets when we build an endpoint list.
func TestBuildEndpointListRemoveStagnantClocks(t *testing.T) {
defer leaktest.AfterTest(t)()
offsets := map[string]RemoteOffset{
"0": {Offset: 0, Uncertainty: 10, MeasuredAt: 11},
"stagnant0": {Offset: 1, Uncertainty: 10, MeasuredAt: 0},
"1": {Offset: 2, Uncertainty: 10, MeasuredAt: 20},
"stagnant1": {Offset: 3, Uncertainty: 10, MeasuredAt: 9},
}
manual := hlc.NewManualClock(0)
clock := hlc.NewClock(manual.UnixNano)
clock.SetMaxOffset(5 * time.Nanosecond)
remoteClocks := newRemoteClockMonitor(clock)
// The stagnant offsets older than this will be removed.
remoteClocks.monitorInterval = 10 * time.Nanosecond
remoteClocks.mu.offsets = offsets
remoteClocks.mu.lastMonitoredAt = time.Unix(0, 10) // offsets measured before this will be removed.
remoteClocks.buildEndpointList()
_, ok0 := offsets["stagnant0"]
_, ok1 := offsets["stagnant1"]
if ok0 || ok1 {
t.Errorf("expected stagant offsets removed, instead offsets: %v", offsets)
}
}
示例14: TestScannerTiming
// TestScannerTiming verifies that ranges are scanned, regardless
// of how many, to match scanInterval.
func TestScannerTiming(t *testing.T) {
defer leaktest.AfterTest(t)()
const count = 3
const runTime = 100 * time.Millisecond
const maxError = 7500 * time.Microsecond
durations := []time.Duration{
15 * time.Millisecond,
25 * time.Millisecond,
}
for i, duration := range durations {
util.SucceedsSoon(t, func() error {
ranges := newTestRangeSet(count, t)
q := &testQueue{}
s := newReplicaScanner(duration, 0, ranges)
s.AddQueues(q)
mc := hlc.NewManualClock(0)
clock := hlc.NewClock(mc.UnixNano)
stopper := stop.NewStopper()
s.Start(clock, stopper)
time.Sleep(runTime)
stopper.Stop()
avg := s.avgScan()
log.Infof("%d: average scan: %s", i, avg)
if avg.Nanoseconds()-duration.Nanoseconds() > maxError.Nanoseconds() ||
duration.Nanoseconds()-avg.Nanoseconds() > maxError.Nanoseconds() {
return errors.Errorf("expected %s, got %s: exceeds max error of %s", duration, avg, maxError)
}
return nil
})
}
}
示例15: TestBuildEndpointListRemoveStagnantClocks
// TestBuildEndpointListRemoveStagnantClocks tests the side effect of removing
// older offsets when we build an endpoint list.
func TestBuildEndpointListRemoveStagnantClocks(t *testing.T) {
offsets := map[string]proto.RemoteOffset{
"0": {Offset: 0, Error: 10, MeasuredAt: 11},
"stagnant0": {Offset: 1, Error: 10, MeasuredAt: 0},
"1": {Offset: 2, Error: 10, MeasuredAt: 20},
"stagnant1": {Offset: 3, Error: 10, MeasuredAt: 9},
}
// The stagnant offsets older than 10ns ago will be removed.
monitorInterval = 10 * time.Nanosecond
manual := hlc.NewManualClock(0)
clock := hlc.NewClock(manual.UnixNano)
clock.SetMaxOffset(5 * time.Nanosecond)
remoteClocks := &RemoteClockMonitor{
offsets: offsets,
lClock: clock,
lastMonitoredAt: 10, // offsets measured before this will be removed.
}
remoteClocks.buildEndpointList()
_, ok0 := offsets["stagnant0"]
_, ok1 := offsets["stagnant1"]
if ok0 || ok1 {
t.Errorf("expected stagant offsets removed, instead offsets: %v", offsets)
}
}