本文整理汇总了Golang中github.com/cockroachdb/cockroach/util/hlc.NewClock函数的典型用法代码示例。如果您正苦于以下问题:Golang NewClock函数的具体用法?Golang NewClock怎么用?Golang NewClock使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了NewClock函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: startFakeServerGossip
// startFakeServerGossip creates local gossip instances and remote faked gossip instance.
// The remote gossip instance launches its faked gossip service just for
// check the client message.
func startFakeServerGossip(t *testing.T) (local *Gossip, remote *fakeGossipServer, stopper *stop.Stopper) {
lclock := hlc.NewClock(hlc.UnixNano)
stopper = stop.NewStopper()
lRPCContext := rpc.NewContext(&base.Context{Insecure: true}, lclock, stopper)
laddr := util.CreateTestAddr("tcp")
lserver := rpc.NewServer(laddr, lRPCContext)
if err := lserver.Start(); err != nil {
t.Fatal(err)
}
local = New(lRPCContext, TestBootstrap)
local.start(lserver, stopper)
rclock := hlc.NewClock(hlc.UnixNano)
raddr := util.CreateTestAddr("tcp")
rRPCContext := rpc.NewContext(&base.Context{Insecure: true}, rclock, stopper)
rserver := rpc.NewServer(raddr, rRPCContext)
if err := rserver.Start(); err != nil {
t.Fatal(err)
}
remote, err := newFakeGossipServer(rserver, stopper)
if err != nil {
t.Fatal(err)
}
addr := rserver.Addr()
remote.nodeAddr = util.MakeUnresolvedAddr(addr.Network(), addr.String())
time.Sleep(time.Millisecond)
return
}
示例2: createTestStoreWithoutStart
// createTestStoreWithoutStart creates a test store using an in-memory
// engine without starting the store. It returns the store, the store
// clock's manual unix nanos time and a stopper. The caller is
// responsible for stopping the stopper upon completion.
func createTestStoreWithoutStart(t *testing.T) (*Store, *hlc.ManualClock, *stop.Stopper) {
stopper := stop.NewStopper()
// Setup fake zone config handler.
config.TestingSetupZoneConfigHook(stopper)
rpcContext := rpc.NewContext(&base.Context{}, hlc.NewClock(hlc.UnixNano), stopper)
ctx := TestStoreContext
ctx.Gossip = gossip.New(rpcContext, gossip.TestInterval, gossip.TestBootstrap)
ctx.StorePool = NewStorePool(ctx.Gossip, TestTimeUntilStoreDeadOff, stopper)
manual := hlc.NewManualClock(0)
ctx.Clock = hlc.NewClock(manual.UnixNano)
eng := engine.NewInMem(roachpb.Attributes{}, 10<<20, stopper)
ctx.Transport = multiraft.NewLocalRPCTransport(stopper)
stopper.AddCloser(ctx.Transport)
sender := &testSender{}
ctx.DB = client.NewDB(sender)
store := NewStore(ctx, eng, &roachpb.NodeDescriptor{NodeID: 1})
sender.store = store
if err := store.Bootstrap(roachpb.StoreIdent{NodeID: 1, StoreID: 1}, stopper); err != nil {
t.Fatal(err)
}
if err := store.BootstrapRange(nil); err != nil {
t.Fatal(err)
}
return store, manual, stopper
}
示例3: createTestStoreWithoutStart
// createTestStoreWithoutStart creates a test store using an in-memory
// engine without starting the store. It returns the store, the store
// clock's manual unix nanos time and a stopper. The caller is
// responsible for stopping the stopper upon completion.
func createTestStoreWithoutStart(t *testing.T) (*Store, *hlc.ManualClock, *stop.Stopper) {
stopper := stop.NewStopper()
rpcContext := rpc.NewContext(rootTestBaseContext, hlc.NewClock(hlc.UnixNano), stopper)
ctx := TestStoreContext
ctx.Gossip = gossip.New(rpcContext, gossip.TestInterval, gossip.TestBootstrap)
manual := hlc.NewManualClock(0)
ctx.Clock = hlc.NewClock(manual.UnixNano)
eng := engine.NewInMem(proto.Attributes{}, 10<<20)
ctx.Transport = multiraft.NewLocalRPCTransport()
stopper.AddCloser(ctx.Transport)
sender := &testSender{}
var err error
if ctx.DB, err = client.Open("//[email protected]", client.SenderOpt(sender)); err != nil {
t.Fatal(err)
}
store := NewStore(ctx, eng, &proto.NodeDescriptor{NodeID: 1})
sender.store = store
if err := store.Bootstrap(proto.StoreIdent{NodeID: 1, StoreID: 1}, stopper); err != nil {
t.Fatal(err)
}
if err := store.BootstrapRange(); err != nil {
t.Fatal(err)
}
return store, manual, stopper
}
示例4: TestOffsetMeasurement
func TestOffsetMeasurement(t *testing.T) {
defer leaktest.AfterTest(t)()
stopper := stop.NewStopper()
defer stopper.Stop()
serverTime := time.Unix(0, 20)
serverClock := hlc.NewClock(serverTime.UnixNano)
serverCtx := newNodeTestContext(serverClock, stopper)
s, ln := newTestServer(t, serverCtx, true)
remoteAddr := ln.Addr().String()
RegisterHeartbeatServer(s, &HeartbeatService{
clock: serverClock,
remoteClockMonitor: serverCtx.RemoteClocks,
})
// Create a client clock that is behind the server clock.
clientAdvancing := AdvancingClock{time: time.Unix(0, 10)}
clientClock := hlc.NewClock(clientAdvancing.UnixNano)
clientClock.SetMaxOffset(time.Millisecond)
clientCtx := newNodeTestContext(clientClock, stopper)
clientCtx.RemoteClocks.offsetTTL = 5 * clientAdvancing.advancementInterval
if _, err := clientCtx.GRPCDial(remoteAddr); err != nil {
t.Fatal(err)
}
expectedOffset := RemoteOffset{Offset: 10, Uncertainty: 0, MeasuredAt: 10}
util.SucceedsSoon(t, func() error {
clientCtx.RemoteClocks.mu.Lock()
defer clientCtx.RemoteClocks.mu.Unlock()
if o, ok := clientCtx.RemoteClocks.mu.offsets[remoteAddr]; !ok {
return util.Errorf("expected offset of %s to be initialized, but it was not", remoteAddr)
} else if o != expectedOffset {
return util.Errorf("expected:\n%v\nactual:\n%v", expectedOffset, o)
}
return nil
})
// Change the client such that it receives a heartbeat right after the
// maximum clock reading delay.
clientAdvancing.Lock()
clientAdvancing.advancementInterval = maximumPingDurationMult*clientClock.MaxOffset() + 1*time.Nanosecond
clientAdvancing.Unlock()
util.SucceedsSoon(t, func() error {
clientCtx.RemoteClocks.mu.Lock()
defer clientCtx.RemoteClocks.mu.Unlock()
if o, ok := clientCtx.RemoteClocks.mu.offsets[remoteAddr]; ok {
return util.Errorf("expected offset to have been cleared, but found %s", o)
}
return nil
})
}
示例5: startGossip
// startGossip creates local and remote gossip instances.
// Both remote and local instances launch the gossip service.
func startGossip(t *testing.T) (local, remote *Gossip, stopper *stop.Stopper) {
stopper = stop.NewStopper()
lclock := hlc.NewClock(hlc.UnixNano)
lRPCContext := rpc.NewContext(&base.Context{Insecure: true}, lclock, stopper)
laddr := util.CreateTestAddr("tcp")
lserver := rpc.NewServer(lRPCContext)
lTLSConfig, err := lRPCContext.GetServerTLSConfig()
if err != nil {
t.Fatal(err)
}
lln, err := util.ListenAndServe(stopper, lserver, laddr, lTLSConfig)
if err != nil {
t.Fatal(err)
}
local = New(lRPCContext, TestBootstrap)
local.SetNodeID(1)
if err := local.SetNodeDescriptor(&roachpb.NodeDescriptor{
NodeID: 1,
Address: util.MakeUnresolvedAddr(laddr.Network(), laddr.String()),
}); err != nil {
t.Fatal(err)
}
rclock := hlc.NewClock(hlc.UnixNano)
rRPCContext := rpc.NewContext(&base.Context{Insecure: true}, rclock, stopper)
raddr := util.CreateTestAddr("tcp")
rserver := rpc.NewServer(rRPCContext)
rTLSConfig, err := rRPCContext.GetServerTLSConfig()
if err != nil {
t.Fatal(err)
}
rln, err := util.ListenAndServe(stopper, rserver, raddr, rTLSConfig)
if err != nil {
t.Fatal(err)
}
remote = New(rRPCContext, TestBootstrap)
remote.SetNodeID(2)
if err := remote.SetNodeDescriptor(&roachpb.NodeDescriptor{
NodeID: 2,
Address: util.MakeUnresolvedAddr(raddr.Network(), raddr.String()),
}); err != nil {
t.Fatal(err)
}
local.start(lserver, lln.Addr(), stopper)
remote.start(rserver, rln.Addr(), stopper)
time.Sleep(time.Millisecond)
return
}
示例6: TestDelayedOffsetMeasurement
// TestDelayedOffsetMeasurement tests that the client will record a
// zero offset if the heartbeat reply exceeds the
// maximumClockReadingDelay, but not the heartbeat timeout.
func TestDelayedOffsetMeasurement(t *testing.T) {
defer leaktest.AfterTest(t)
stopper := util.NewStopper()
defer stopper.Stop()
serverManual := hlc.NewManualClock(10)
serverClock := hlc.NewClock(serverManual.UnixNano)
s := createTestServer(serverClock, stopper, t)
heartbeat := &HeartbeatService{
clock: serverClock,
remoteClockMonitor: newRemoteClockMonitor(serverClock),
}
if err := s.RegisterName("Heartbeat", heartbeat); err != nil {
t.Fatalf("Unable to register heartbeat service: %s", err)
}
// Create a client that receives a heartbeat right after the
// maximumClockReadingDelay.
advancing := AdvancingClock{
time: 0,
advancementInterval: maximumClockReadingDelay.Nanoseconds() + 1,
}
clientClock := hlc.NewClock(advancing.UnixNano)
context := NewServerTestContext(clientClock, stopper)
c := NewClient(s.Addr(), nil, context)
<-c.Ready
// Ensure we get a good heartbeat before continuing.
if err := util.IsTrueWithin(c.IsHealthy, heartbeatInterval*10); err != nil {
t.Fatal(err)
}
// Since the reply took too long, we should have a zero offset, even
// though the client is still healthy because it received a heartbeat
// reply.
if o := c.RemoteOffset(); !o.Equal(proto.RemoteOffset{}) {
t.Errorf("expected offset %v, actual %v", proto.RemoteOffset{}, o)
}
// Ensure the general offsets map was updated properly too.
context.RemoteClocks.mu.Lock()
if o, ok := context.RemoteClocks.offsets[c.addr.String()]; ok {
t.Errorf("expected offset to not exist, but found %v", o)
}
context.RemoteClocks.mu.Unlock()
}
示例7: TestTxnCoordSenderSingleRoundtripTxn
// TestTxnCoordSenderSingleRoundtripTxn checks that a batch which completely
// holds the writing portion of a Txn (including EndTransaction) does not
// launch a heartbeat goroutine at all.
func TestTxnCoordSenderSingleRoundtripTxn(t *testing.T) {
defer leaktest.AfterTest(t)
stopper := stop.NewStopper()
manual := hlc.NewManualClock(0)
clock := hlc.NewClock(manual.UnixNano)
clock.SetMaxOffset(20)
ts := NewTxnCoordSender(senderFn(func(_ context.Context, ba proto.BatchRequest) (*proto.BatchResponse, *proto.Error) {
return ba.CreateReply().(*proto.BatchResponse), nil
}), clock, false, nil, stopper)
// Stop the stopper manually, prior to trying the transaction. This has the
// effect of returning a NodeUnavailableError for any attempts at launching
// a heartbeat goroutine.
stopper.Stop()
var ba proto.BatchRequest
put := &proto.PutRequest{}
put.Key = proto.Key("test")
ba.Add(put)
ba.Add(&proto.EndTransactionRequest{})
ba.Txn = &proto.Transaction{Name: "test"}
_, pErr := ts.Send(context.Background(), ba)
if pErr != nil {
t.Fatal(pErr)
}
}
示例8: TestTimestampCacheReadVsWrite
// TestTimestampCacheReadVsWrite verifies that the timestamp cache
// can differentiate between read and write timestamp.
func TestTimestampCacheReadVsWrite(t *testing.T) {
defer leaktest.AfterTest(t)
manual := hlc.NewManualClock(0)
clock := hlc.NewClock(manual.UnixNano)
tc := NewTimestampCache(clock)
// Add read-only non-txn entry at current time.
ts1 := clock.Now()
tc.Add(roachpb.Key("a"), roachpb.Key("b"), ts1, nil, true)
// Add two successive txn entries; one read-only and one read-write.
txn1ID := uuid.NewUUID4()
txn2ID := uuid.NewUUID4()
ts2 := clock.Now()
tc.Add(roachpb.Key("a"), nil, ts2, txn1ID, true)
ts3 := clock.Now()
tc.Add(roachpb.Key("a"), nil, ts3, txn2ID, false)
// Fetching with no transaction gets latest values.
if rTS, wTS := tc.GetMax(roachpb.Key("a"), nil, nil); !rTS.Equal(ts2) || !wTS.Equal(ts3) {
t.Errorf("expected %s %s; got %s %s", ts2, ts3, rTS, wTS)
}
// Fetching with txn ID "1" gets original for read and most recent for write.
if rTS, wTS := tc.GetMax(roachpb.Key("a"), nil, txn1ID); !rTS.Equal(ts1) || !wTS.Equal(ts3) {
t.Errorf("expected %s %s; got %s %s", ts1, ts3, rTS, wTS)
}
// Fetching with txn ID "2" gets ts2 for read and low water mark for write.
if rTS, wTS := tc.GetMax(roachpb.Key("a"), nil, txn2ID); !rTS.Equal(ts2) || !wTS.Equal(tc.lowWater) {
t.Errorf("expected %s %s; got %s %s", ts2, tc.lowWater, rTS, wTS)
}
}
示例9: TestTimestampCacheWithTxnID
// TestTimestampCacheWithTxnID verifies that timestamps matching
// the specified txn ID are ignored.
func TestTimestampCacheWithTxnID(t *testing.T) {
defer leaktest.AfterTest(t)
manual := hlc.NewManualClock(0)
clock := hlc.NewClock(manual.UnixNano)
tc := NewTimestampCache(clock)
// Add two successive txn entries.
txn1ID := uuid.NewUUID4()
txn2ID := uuid.NewUUID4()
ts1 := clock.Now()
tc.Add(roachpb.Key("a"), roachpb.Key("c"), ts1, txn1ID, true)
ts2 := clock.Now()
// This entry will remove "a"-"b" from the cache.
tc.Add(roachpb.Key("b"), roachpb.Key("d"), ts2, txn2ID, true)
// Fetching with no transaction gets latest value.
if ts, _ := tc.GetMax(roachpb.Key("b"), nil, nil); !ts.Equal(ts2) {
t.Errorf("expected %s; got %s", ts2, ts)
}
// Fetching with txn ID "1" gets most recent.
if ts, _ := tc.GetMax(roachpb.Key("b"), nil, txn1ID); !ts.Equal(ts2) {
t.Errorf("expected %s; got %s", ts2, ts)
}
// Fetching with txn ID "2" skips most recent.
if ts, _ := tc.GetMax(roachpb.Key("b"), nil, txn2ID); !ts.Equal(ts1) {
t.Errorf("expected %s; got %s", ts1, ts)
}
}
示例10: Start
// Start starts the test cluster by bootstrapping an in-memory store
// (defaults to maximum of 50M). The server is started, launching the
// node RPC server and all HTTP endpoints. Use the value of
// TestServer.Addr after Start() for client connections. Use Stop()
// to shutdown the server after the test completes.
func (ltc *LocalTestCluster) Start(t util.Tester) {
ltc.Manual = hlc.NewManualClock(0)
ltc.Clock = hlc.NewClock(ltc.Manual.UnixNano)
ltc.Stopper = stop.NewStopper()
rpcContext := rpc.NewContext(testutils.NewRootTestBaseContext(), ltc.Clock, ltc.Stopper)
ltc.Gossip = gossip.New(rpcContext, gossip.TestInterval, gossip.TestBootstrap)
ltc.Eng = engine.NewInMem(proto.Attributes{}, 50<<20)
ltc.lSender = newRetryableLocalSender(NewLocalSender())
ltc.Sender = NewTxnCoordSender(ltc.lSender, ltc.Clock, false, nil, ltc.Stopper)
var err error
if ltc.DB, err = client.Open("//[email protected]", client.SenderOpt(ltc.Sender)); err != nil {
t.Fatal(err)
}
transport := multiraft.NewLocalRPCTransport(ltc.Stopper)
ltc.Stopper.AddCloser(transport)
ctx := storage.TestStoreContext
ctx.Clock = ltc.Clock
ctx.DB = ltc.DB
ctx.Gossip = ltc.Gossip
ctx.Transport = transport
ltc.Store = storage.NewStore(ctx, ltc.Eng, &proto.NodeDescriptor{NodeID: 1})
if err := ltc.Store.Bootstrap(proto.StoreIdent{NodeID: 1, StoreID: 1}, ltc.Stopper); err != nil {
t.Fatalf("unable to start local test cluster: %s", err)
}
ltc.lSender.AddStore(ltc.Store)
if err := ltc.Store.BootstrapRange(nil); err != nil {
t.Fatalf("unable to start local test cluster: %s", err)
}
if err := ltc.Store.Start(ltc.Stopper); err != nil {
t.Fatalf("unable to start local test cluster: %s", err)
}
}
示例11: TestStoresVisitStores
func TestStoresVisitStores(t *testing.T) {
defer leaktest.AfterTest(t)()
ls := NewStores(hlc.NewClock(hlc.UnixNano))
numStores := 10
for i := 0; i < numStores; i++ {
ls.AddStore(&Store{Ident: roachpb.StoreIdent{StoreID: roachpb.StoreID(i)}})
}
visit := make([]bool, numStores)
err := ls.VisitStores(func(s *Store) error { visit[s.Ident.StoreID] = true; return nil })
if err != nil {
t.Errorf("unexpected error on visit: %s", err.Error())
}
for i, visited := range visit {
if !visited {
t.Errorf("store %d was not visited", i)
}
}
err = ls.VisitStores(func(s *Store) error { return errors.New("") })
if err == nil {
t.Errorf("expected visit error")
}
}
示例12: TestHeartbeatReply
func TestHeartbeatReply(t *testing.T) {
defer leaktest.AfterTest(t)
manual := hlc.NewManualClock(5)
clock := hlc.NewClock(manual.UnixNano)
heartbeat := &HeartbeatService{
clock: clock,
remoteClockMonitor: newRemoteClockMonitor(clock),
}
request := &PingRequest{
Ping: "testPing",
}
var response *PingResponse
if responseI, err := heartbeat.Ping(request); err != nil {
t.Fatal(err)
} else {
response = responseI.(*PingResponse)
}
if response.Pong != request.Ping {
t.Errorf("expected %s to be equal to %s", response.Pong, request.Ping)
}
if response.ServerTime != 5 {
t.Errorf("expected server time 5, instead %d", response.ServerTime)
}
}
示例13: TestTxnCoordSenderSingleRoundtripTxn
// TestTxnCoordSenderSingleRoundtripTxn checks that a batch which completely
// holds the writing portion of a Txn (including EndTransaction) does not
// launch a heartbeat goroutine at all.
func TestTxnCoordSenderSingleRoundtripTxn(t *testing.T) {
defer leaktest.AfterTest(t)
stopper := stop.NewStopper()
manual := hlc.NewManualClock(0)
clock := hlc.NewClock(manual.UnixNano)
clock.SetMaxOffset(20)
ts := NewTxnCoordSender(senderFn(func(_ context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) {
br := ba.CreateReply()
br.Txn = ba.Txn.Clone()
br.Txn.Writing = true
return br, nil
}), clock, false, nil, stopper)
// Stop the stopper manually, prior to trying the transaction. This has the
// effect of returning a NodeUnavailableError for any attempts at launching
// a heartbeat goroutine.
stopper.Stop()
var ba roachpb.BatchRequest
key := roachpb.Key("test")
ba.Add(&roachpb.BeginTransactionRequest{Span: roachpb.Span{Key: key}})
ba.Add(&roachpb.PutRequest{Span: roachpb.Span{Key: key}})
ba.Add(&roachpb.EndTransactionRequest{})
ba.Txn = &roachpb.Transaction{Name: "test"}
_, pErr := ts.Send(context.Background(), ba)
if pErr != nil {
t.Fatal(pErr)
}
}
示例14: TestMultiRangeScanReverseScanInconsistent
// TestMultiRangeScanReverseScanInconsistent verifies that a Scan/ReverseScan
// across ranges that doesn't require read consistency will set a timestamp
// using the clock local to the distributed sender.
func TestMultiRangeScanReverseScanInconsistent(t *testing.T) {
defer leaktest.AfterTest(t)
s, db := setupMultipleRanges(t, "b")
defer s.Stop()
// Write keys "a" and "b", the latter of which is the first key in the
// second range.
keys := []string{"a", "b"}
ts := []time.Time{}
for i, key := range keys {
b := &client.Batch{}
b.Put(key, "value")
if err := db.Run(b); err != nil {
t.Fatal(err)
}
ts = append(ts, b.Results[0].Rows[0].Timestamp())
log.Infof("%d: %s", i, b.Results[0].Rows[0].Timestamp())
}
// Do an inconsistent Scan/ReverseScan from a new DistSender and verify
// it does the read at its local clock and doesn't receive an
// OpRequiresTxnError. We set the local clock to the timestamp of
// the first key to verify it's used to read only key "a".
manual := hlc.NewManualClock(ts[1].UnixNano() - 1)
clock := hlc.NewClock(manual.UnixNano)
ds := kv.NewDistSender(&kv.DistSenderContext{Clock: clock}, s.Gossip())
// Scan.
sa := roachpb.NewScan(roachpb.Key("a"), roachpb.Key("c"), 0).(*roachpb.ScanRequest)
reply, err := client.SendWrappedWith(ds, nil, roachpb.BatchRequest_Header{
ReadConsistency: roachpb.INCONSISTENT,
}, sa)
if err != nil {
t.Fatal(err)
}
sr := reply.(*roachpb.ScanResponse)
if l := len(sr.Rows); l != 1 {
t.Fatalf("expected 1 row; got %d", l)
}
if key := string(sr.Rows[0].Key); keys[0] != key {
t.Errorf("expected key %q; got %q", keys[0], key)
}
// ReverseScan.
rsa := roachpb.NewReverseScan(roachpb.Key("a"), roachpb.Key("c"), 0).(*roachpb.ReverseScanRequest)
reply, err = client.SendWrappedWith(ds, nil, roachpb.BatchRequest_Header{
ReadConsistency: roachpb.INCONSISTENT,
}, rsa)
if err != nil {
t.Fatal(err)
}
rsr := reply.(*roachpb.ReverseScanResponse)
if l := len(rsr.Rows); l != 1 {
t.Fatalf("expected 1 row; got %d", l)
}
if key := string(rsr.Rows[0].Key); keys[0] != key {
t.Errorf("expected key %q; got %q", keys[0], key)
}
}
示例15: gossipForTest
func gossipForTest(t *testing.T) (*gossip.Gossip, *stop.Stopper) {
stopper := stop.NewStopper()
// Setup fake zone config handler.
config.TestingSetupZoneConfigHook(stopper)
rpcContext := rpc.NewContext(&base.Context{}, hlc.NewClock(hlc.UnixNano), stopper)
g := gossip.New(rpcContext, gossip.TestBootstrap, stopper)
// Have to call g.SetNodeID before call g.AddInfo
g.SetNodeID(roachpb.NodeID(1))
// Put an empty system config into gossip.
if err := g.AddInfoProto(gossip.KeySystemConfig,
&config.SystemConfig{}, 0); err != nil {
t.Fatal(err)
}
// Wait for SystemConfig.
util.SucceedsSoon(t, func() error {
if g.GetSystemConfig() == nil {
return util.Errorf("expected non-nil system config")
}
return nil
})
return g, stopper
}