本文整理匯總了Golang中github.com/cockroachdb/cockroach/pkg/util/hlc.NewClock函數的典型用法代碼示例。如果您正苦於以下問題:Golang NewClock函數的具體用法?Golang NewClock怎麽用?Golang NewClock使用的例子?那麽, 這裏精選的函數代碼示例或許可以為您提供幫助。
在下文中一共展示了NewClock函數的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: TestOffsetMeasurement
func TestOffsetMeasurement(t *testing.T) {
defer leaktest.AfterTest(t)()
stopper := stop.NewStopper()
defer stopper.Stop()
serverTime := time.Unix(0, 20)
serverClock := hlc.NewClock(serverTime.UnixNano)
serverCtx := newNodeTestContext(serverClock, stopper)
s, ln := newTestServer(t, serverCtx, true)
remoteAddr := ln.Addr().String()
RegisterHeartbeatServer(s, &HeartbeatService{
clock: serverClock,
remoteClockMonitor: serverCtx.RemoteClocks,
})
// Create a client clock that is behind the server clock.
clientAdvancing := AdvancingClock{time: time.Unix(0, 10)}
clientClock := hlc.NewClock(clientAdvancing.UnixNano)
clientClock.SetMaxOffset(time.Millisecond)
clientCtx := newNodeTestContext(clientClock, stopper)
clientCtx.RemoteClocks.offsetTTL = 5 * clientAdvancing.getAdvancementInterval()
if _, err := clientCtx.GRPCDial(remoteAddr); err != nil {
t.Fatal(err)
}
expectedOffset := RemoteOffset{Offset: 10, Uncertainty: 0, MeasuredAt: 10}
util.SucceedsSoon(t, func() error {
clientCtx.RemoteClocks.mu.Lock()
defer clientCtx.RemoteClocks.mu.Unlock()
if o, ok := clientCtx.RemoteClocks.mu.offsets[remoteAddr]; !ok {
return errors.Errorf("expected offset of %s to be initialized, but it was not", remoteAddr)
} else if o != expectedOffset {
return errors.Errorf("expected:\n%v\nactual:\n%v", expectedOffset, o)
}
return nil
})
// Change the client such that it receives a heartbeat right after the
// maximum clock reading delay.
clientAdvancing.setAdvancementInterval(
maximumPingDurationMult*clientClock.MaxOffset() + 1*time.Nanosecond)
util.SucceedsSoon(t, func() error {
clientCtx.RemoteClocks.mu.Lock()
defer clientCtx.RemoteClocks.mu.Unlock()
if o, ok := clientCtx.RemoteClocks.mu.offsets[remoteAddr]; ok {
return errors.Errorf("expected offset to have been cleared, but found %s", o)
}
return nil
})
}
示例2: TestClockOffsetMismatch
func TestClockOffsetMismatch(t *testing.T) {
defer leaktest.AfterTest(t)()
defer func() {
if r := recover(); r != nil {
fmt.Println(r)
if match, _ := regexp.MatchString("locally configured maximum clock offset", r.(string)); !match {
t.Errorf("expected clock mismatch error")
}
}
}()
clock := hlc.NewClock(hlc.UnixNano, 250*time.Millisecond)
hs := &HeartbeatService{
clock: clock,
remoteClockMonitor: newRemoteClockMonitor(clock, time.Hour),
}
request := &PingRequest{
Ping: "testManual",
Addr: "test",
MaxOffsetNanos: (500 * time.Millisecond).Nanoseconds(),
}
ctx := context.Background()
_, _ = hs.Ping(ctx, request)
t.Fatal("should not reach")
}
示例3: TestAcquireAndRelease
func TestAcquireAndRelease(t *testing.T) {
defer leaktest.AfterTest(t)()
s, db := setup(t)
defer s.Stopper().Stop()
ctx := context.Background()
manual := hlc.NewManualClock(123)
clock := hlc.NewClock(manual.UnixNano, time.Nanosecond)
lm := client.NewLeaseManager(db, clock, client.LeaseManagerOptions{ClientID: clientID1})
l, err := lm.AcquireLease(ctx, leaseKey)
if err != nil {
t.Fatal(err)
}
if err := lm.ReleaseLease(ctx, l); err != nil {
t.Fatal(err)
}
if err := lm.ReleaseLease(ctx, l); !testutils.IsError(err, "unexpected value") {
t.Fatal(err)
}
l, err = lm.AcquireLease(ctx, leaseKey)
if err != nil {
t.Fatal(err)
}
if err := lm.ReleaseLease(ctx, l); err != nil {
t.Fatal(err)
}
}
示例4: TestTimestampSelectionInOptions
// TestTimestampSelectionInOptions verifies that a client can set the
// Txn timestamp using client.TxnExecOptions.
func TestTimestampSelectionInOptions(t *testing.T) {
defer leaktest.AfterTest(t)()
db := NewDB(newTestSender(nil, nil))
txn := NewTxn(context.Background(), *db)
mc := hlc.NewManualClock(100)
clock := hlc.NewClock(mc.UnixNano, time.Nanosecond)
execOpt := TxnExecOptions{
Clock: clock,
}
refTimestamp := clock.Now()
txnClosure := func(txn *Txn, opt *TxnExecOptions) error {
// Ensure the KV transaction is created.
return txn.Put("a", "b")
}
if err := txn.Exec(execOpt, txnClosure); err != nil {
t.Fatal(err)
}
// Check the timestamp was initialized.
if txn.Proto.OrigTimestamp.WallTime != refTimestamp.WallTime {
t.Errorf("expected txn orig ts to be %s; got %s", refTimestamp, txn.Proto.OrigTimestamp)
}
}
示例5: TestHeartbeatReply
func TestHeartbeatReply(t *testing.T) {
defer leaktest.AfterTest(t)()
manual := hlc.NewManualClock(5)
clock := hlc.NewClock(manual.UnixNano, time.Nanosecond)
heartbeat := &HeartbeatService{
clock: clock,
remoteClockMonitor: newRemoteClockMonitor(clock, time.Hour),
}
request := &PingRequest{
Ping: "testPing",
}
response, err := heartbeat.Ping(context.Background(), request)
if err != nil {
t.Fatal(err)
}
if response.Pong != request.Ping {
t.Errorf("expected %s to be equal to %s", response.Pong, request.Ping)
}
if response.ServerTime != 5 {
t.Errorf("expected server time 5, instead %d", response.ServerTime)
}
}
示例6: TestTimestampCacheEqualTimestamps
// TestTimestampCacheEqualTimestamp verifies that in the event of two
// non-overlapping transactions with equal timestamps, the returned
// timestamp is not owned by either one.
func TestTimestampCacheEqualTimestamps(t *testing.T) {
defer leaktest.AfterTest(t)()
manual := hlc.NewManualClock(123)
clock := hlc.NewClock(manual.UnixNano, time.Nanosecond)
tc := newTimestampCache(clock)
txn1 := uuid.MakeV4()
txn2 := uuid.MakeV4()
// Add two non-overlapping transactions at the same timestamp.
ts1 := clock.Now()
tc.add(roachpb.Key("a"), roachpb.Key("b"), ts1, &txn1, true)
tc.add(roachpb.Key("b"), roachpb.Key("c"), ts1, &txn2, true)
// When querying either side separately, the transaction ID is returned.
if ts, txn, _ := tc.GetMaxRead(roachpb.Key("a"), roachpb.Key("b")); !ts.Equal(ts1) {
t.Errorf("expected 'a'-'b' to have timestamp %s, but found %s", ts1, ts)
} else if *txn != txn1 {
t.Errorf("expected 'a'-'b' to have txn id %s, but found %s", txn1, txn)
}
if ts, txn, _ := tc.GetMaxRead(roachpb.Key("b"), roachpb.Key("c")); !ts.Equal(ts1) {
t.Errorf("expected 'b'-'c' to have timestamp %s, but found %s", ts1, ts)
} else if *txn != txn2 {
t.Errorf("expected 'b'-'c' to have txn id %s, but found %s", txn2, txn)
}
// Querying a span that overlaps both returns a nil txn ID; neither
// can proceed here.
if ts, txn, _ := tc.GetMaxRead(roachpb.Key("a"), roachpb.Key("c")); !ts.Equal(ts1) {
t.Errorf("expected 'a'-'c' to have timestamp %s, but found %s", ts1, ts)
} else if txn != nil {
t.Errorf("expected 'a'-'c' to have nil txn id, but found %s", txn)
}
}
示例7: NewNetwork
// NewNetwork creates nodeCount gossip nodes.
func NewNetwork(stopper *stop.Stopper, nodeCount int, createResolvers bool) *Network {
log.Infof(context.TODO(), "simulating gossip network with %d nodes", nodeCount)
n := &Network{
Nodes: []*Node{},
Stopper: stopper,
}
n.rpcContext = rpc.NewContext(
log.AmbientContext{},
&base.Config{Insecure: true},
hlc.NewClock(hlc.UnixNano, time.Nanosecond),
n.Stopper,
)
var err error
n.tlsConfig, err = n.rpcContext.GetServerTLSConfig()
if err != nil {
log.Fatal(context.TODO(), err)
}
for i := 0; i < nodeCount; i++ {
node, err := n.CreateNode()
if err != nil {
log.Fatal(context.TODO(), err)
}
// Build a resolver for each instance or we'll get data races.
if createResolvers {
r, err := resolver.NewResolverFromAddress(n.Nodes[0].Addr())
if err != nil {
log.Fatalf(context.TODO(), "bad gossip address %s: %s", n.Nodes[0].Addr(), err)
}
node.Gossip.SetResolvers([]resolver.Resolver{r})
}
}
return n
}
示例8: TestReacquireLease
func TestReacquireLease(t *testing.T) {
defer leaktest.AfterTest(t)()
s, db := setup(t)
defer s.Stopper().Stop()
ctx := context.Background()
manual := hlc.NewManualClock(123)
clock := hlc.NewClock(manual.UnixNano, time.Nanosecond)
lm := client.NewLeaseManager(db, clock, client.LeaseManagerOptions{ClientID: clientID1})
l, err := lm.AcquireLease(ctx, leaseKey)
if err != nil {
t.Fatal(err)
}
// We allow re-acquiring the same lease as long as the client ID is
// the same to allow a client to reacquire its own leases rather than
// having to wait them out if it crashes and restarts.
l, err = lm.AcquireLease(ctx, leaseKey)
if err != nil {
t.Fatal(err)
}
if err := lm.ReleaseLease(ctx, l); err != nil {
t.Fatal(err)
}
}
示例9: TestStoreRangeMergeStats
// TestStoreRangeMergeStats starts by splitting a range, then writing random data
// to both sides of the split. It then merges the ranges and verifies the merged
// range has stats consistent with recomputations.
func TestStoreRangeMergeStats(t *testing.T) {
defer leaktest.AfterTest(t)()
manual := hlc.NewManualClock(123)
storeCfg := storage.TestStoreConfig(hlc.NewClock(manual.UnixNano, time.Nanosecond))
storeCfg.TestingKnobs.DisableSplitQueue = true
store, stopper := createTestStoreWithConfig(t, storeCfg)
defer stopper.Stop()
// Split the range.
aDesc, bDesc, pErr := createSplitRanges(store)
if pErr != nil {
t.Fatal(pErr)
}
// Write some values left and right of the proposed split key.
writeRandomDataToRange(t, store, aDesc.RangeID, []byte("aaa"))
writeRandomDataToRange(t, store, bDesc.RangeID, []byte("ccc"))
// Get the range stats for both ranges now that we have data.
snap := store.Engine().NewSnapshot()
defer snap.Close()
msA, err := engine.MVCCGetRangeStats(context.Background(), snap, aDesc.RangeID)
if err != nil {
t.Fatal(err)
}
msB, err := engine.MVCCGetRangeStats(context.Background(), snap, bDesc.RangeID)
if err != nil {
t.Fatal(err)
}
// Stats should agree with recomputation.
if err := verifyRecomputedStats(snap, aDesc, msA, manual.UnixNano()); err != nil {
t.Fatalf("failed to verify range A's stats before split: %v", err)
}
if err := verifyRecomputedStats(snap, bDesc, msB, manual.UnixNano()); err != nil {
t.Fatalf("failed to verify range B's stats before split: %v", err)
}
manual.Increment(100)
// Merge the b range back into the a range.
args := adminMergeArgs(roachpb.KeyMin)
if _, err := client.SendWrapped(context.Background(), rg1(store), &args); err != nil {
t.Fatal(err)
}
replMerged := store.LookupReplica(aDesc.StartKey, nil)
// Get the range stats for the merged range and verify.
snap = store.Engine().NewSnapshot()
defer snap.Close()
msMerged, err := engine.MVCCGetRangeStats(context.Background(), snap, replMerged.RangeID)
if err != nil {
t.Fatal(err)
}
// Merged stats should agree with recomputation.
if err := verifyRecomputedStats(snap, replMerged.Desc(), msMerged, manual.UnixNano()); err != nil {
t.Errorf("failed to verify range's stats after merge: %v", err)
}
}
示例10: TestTxnCoordSenderSingleRoundtripTxn
// TestTxnCoordSenderSingleRoundtripTxn checks that a batch which completely
// holds the writing portion of a Txn (including EndTransaction) does not
// launch a heartbeat goroutine at all.
func TestTxnCoordSenderSingleRoundtripTxn(t *testing.T) {
defer leaktest.AfterTest(t)()
stopper := stop.NewStopper()
manual := hlc.NewManualClock(123)
clock := hlc.NewClock(manual.UnixNano, 20*time.Nanosecond)
senderFunc := func(_ context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) {
br := ba.CreateReply()
txnClone := ba.Txn.Clone()
br.Txn = &txnClone
br.Txn.Writing = true
return br, nil
}
ambient := log.AmbientContext{Tracer: tracing.NewTracer()}
ts := NewTxnCoordSender(
ambient, senderFn(senderFunc), clock, false, stopper, MakeTxnMetrics(metric.TestSampleInterval),
)
// Stop the stopper manually, prior to trying the transaction. This has the
// effect of returning a NodeUnavailableError for any attempts at launching
// a heartbeat goroutine.
stopper.Stop()
var ba roachpb.BatchRequest
key := roachpb.Key("test")
ba.Add(&roachpb.BeginTransactionRequest{Span: roachpb.Span{Key: key}})
ba.Add(&roachpb.PutRequest{Span: roachpb.Span{Key: key}})
ba.Add(&roachpb.EndTransactionRequest{})
ba.Txn = &roachpb.Transaction{Name: "test"}
_, pErr := ts.Send(context.Background(), ba)
if pErr != nil {
t.Fatal(pErr)
}
}
示例11: TestHeartbeatCB
func TestHeartbeatCB(t *testing.T) {
defer leaktest.AfterTest(t)()
stopper := stop.NewStopper()
defer stopper.Stop()
clock := hlc.NewClock(time.Unix(0, 20).UnixNano, time.Nanosecond)
serverCtx := newNodeTestContext(clock, stopper)
s, ln := newTestServer(t, serverCtx, true)
remoteAddr := ln.Addr().String()
RegisterHeartbeatServer(s, &HeartbeatService{
clock: clock,
remoteClockMonitor: serverCtx.RemoteClocks,
})
// Clocks don't matter in this test.
clientCtx := newNodeTestContext(clock, stopper)
var once sync.Once
ch := make(chan struct{})
clientCtx.HeartbeatCB = func() {
once.Do(func() {
close(ch)
})
}
_, err := clientCtx.GRPCDial(remoteAddr)
if err != nil {
t.Fatal(err)
}
<-ch
}
示例12: TestScannerTiming
// TestScannerTiming verifies that ranges are scanned, regardless
// of how many, to match scanInterval.
func TestScannerTiming(t *testing.T) {
defer leaktest.AfterTest(t)()
const count = 3
const runTime = 100 * time.Millisecond
const maxError = 7500 * time.Microsecond
durations := []time.Duration{
15 * time.Millisecond,
25 * time.Millisecond,
}
for i, duration := range durations {
testutils.SucceedsSoon(t, func() error {
ranges := newTestRangeSet(count, t)
q := &testQueue{}
s := newReplicaScanner(log.AmbientContext{}, duration, 0, ranges)
s.AddQueues(q)
mc := hlc.NewManualClock(123)
clock := hlc.NewClock(mc.UnixNano, time.Nanosecond)
stopper := stop.NewStopper()
s.Start(clock, stopper)
time.Sleep(runTime)
stopper.Stop()
avg := s.avgScan()
log.Infof(context.Background(), "%d: average scan: %s", i, avg)
if avg.Nanoseconds()-duration.Nanoseconds() > maxError.Nanoseconds() ||
duration.Nanoseconds()-avg.Nanoseconds() > maxError.Nanoseconds() {
return errors.Errorf("expected %s, got %s: exceeds max error of %s", duration, avg, maxError)
}
return nil
})
}
}
示例13: createTestNode
// createTestNode creates an rpc server using the specified address,
// gossip instance, KV database and a node using the specified slice
// of engines. The server, clock and node are returned. If gossipBS is
// not nil, the gossip bootstrap address is set to gossipBS.
func createTestNode(
addr net.Addr, engines []engine.Engine, gossipBS net.Addr, t *testing.T,
) (*grpc.Server, net.Addr, *hlc.Clock, *Node, *stop.Stopper) {
cfg := storage.StoreConfig{}
stopper := stop.NewStopper()
cfg.Clock = hlc.NewClock(hlc.UnixNano)
nodeRPCContext := rpc.NewContext(log.AmbientContext{}, nodeTestBaseContext, cfg.Clock, stopper)
cfg.ScanInterval = 10 * time.Hour
cfg.ConsistencyCheckInterval = 10 * time.Hour
grpcServer := rpc.NewServer(nodeRPCContext)
serverCfg := makeTestConfig()
cfg.Gossip = gossip.NewTest(
0,
nodeRPCContext,
grpcServer,
serverCfg.GossipBootstrapResolvers,
stopper,
metric.NewRegistry(),
)
ln, err := netutil.ListenAndServeGRPC(stopper, grpcServer, addr)
if err != nil {
t.Fatal(err)
}
if gossipBS != nil {
// Handle possibility of a :0 port specification.
if gossipBS.Network() == addr.Network() && gossipBS.String() == addr.String() {
gossipBS = ln.Addr()
}
r, err := resolver.NewResolverFromAddress(gossipBS)
if err != nil {
t.Fatalf("bad gossip address %s: %s", gossipBS, err)
}
cfg.Gossip.SetResolvers([]resolver.Resolver{r})
cfg.Gossip.Start(ln.Addr())
}
retryOpts := base.DefaultRetryOptions()
retryOpts.Closer = stopper.ShouldQuiesce()
distSender := kv.NewDistSender(kv.DistSenderConfig{
Clock: cfg.Clock,
RPCContext: nodeRPCContext,
RPCRetryOptions: &retryOpts,
}, cfg.Gossip)
cfg.AmbientCtx.Tracer = tracing.NewTracer()
sender := kv.NewTxnCoordSender(
cfg.AmbientCtx,
distSender,
cfg.Clock,
false,
stopper,
kv.MakeTxnMetrics(metric.TestSampleInterval),
)
cfg.DB = client.NewDB(sender)
cfg.Transport = storage.NewDummyRaftTransport()
cfg.MetricsSampleInterval = metric.TestSampleInterval
node := NewNode(cfg, status.NewMetricsRecorder(cfg.Clock), metric.NewRegistry(), stopper,
kv.MakeTxnMetrics(metric.TestSampleInterval), sql.MakeEventLogger(nil))
roachpb.RegisterInternalServer(grpcServer, node)
return grpcServer, ln.Addr(), cfg.Clock, node, stopper
}
示例14: TestVerifyClockOffset
func TestVerifyClockOffset(t *testing.T) {
defer leaktest.AfterTest(t)()
clock := hlc.NewClock(hlc.NewManualClock(123).UnixNano, 50*time.Nanosecond)
monitor := newRemoteClockMonitor(clock, time.Hour)
for idx, tc := range []struct {
offsets []RemoteOffset
expectedError bool
}{
// no error if no offsets.
{[]RemoteOffset{}, false},
// no error when a majority of offsets are under the maximum tolerated offset.
{[]RemoteOffset{{Offset: 20, Uncertainty: 10}, {Offset: 48, Uncertainty: 20}, {Offset: 61, Uncertainty: 25}, {Offset: 91, Uncertainty: 31}}, false},
// error when less than a majority of offsets are under the maximum tolerated offset.
{[]RemoteOffset{{Offset: 20, Uncertainty: 10}, {Offset: 58, Uncertainty: 20}, {Offset: 85, Uncertainty: 25}, {Offset: 91, Uncertainty: 31}}, true},
} {
monitor.mu.offsets = make(map[string]RemoteOffset)
for i, offset := range tc.offsets {
monitor.mu.offsets[strconv.Itoa(i)] = offset
}
if tc.expectedError {
if err := monitor.VerifyClockOffset(context.TODO()); !testutils.IsError(err, errOffsetGreaterThanMaxOffset) {
t.Errorf("%d: unexpected error %v", idx, err)
}
} else {
if err := monitor.VerifyClockOffset(context.TODO()); err != nil {
t.Errorf("%d: unexpected error %s", idx, err)
}
}
}
}
示例15: TestClockOffsetMetrics
func TestClockOffsetMetrics(t *testing.T) {
defer leaktest.AfterTest(t)()
stopper := stop.NewStopper()
defer stopper.Stop()
clock := hlc.NewClock(hlc.NewManualClock(123).UnixNano, 20*time.Nanosecond)
monitor := newRemoteClockMonitor(clock, time.Hour)
monitor.mu.offsets = map[string]RemoteOffset{
"0": {
Offset: 13,
Uncertainty: 7,
MeasuredAt: 6,
},
}
if err := monitor.VerifyClockOffset(context.TODO()); err != nil {
t.Fatal(err)
}
if a, e := monitor.Metrics().ClockOffsetMeanNanos.Value(), int64(13); a != e {
t.Errorf("mean %d != expected %d", a, e)
}
if a, e := monitor.Metrics().ClockOffsetStdDevNanos.Value(), int64(7); a != e {
t.Errorf("stdDev %d != expected %d", a, e)
}
}