本文整理匯總了Golang中github.com/cockroachdb/cockroach/roachpb.Timestamp.Less方法的典型用法代碼示例。如果您正苦於以下問題:Golang Timestamp.Less方法的具體用法?Golang Timestamp.Less怎麽用?Golang Timestamp.Less使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類github.com/cockroachdb/cockroach/roachpb.Timestamp
的用法示例。
在下文中一共展示了Timestamp.Less方法的11個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: TestTxnCoordSenderHeartbeat
// TestTxnCoordSenderHeartbeat verifies periodic heartbeat of the
// transaction record.
func TestTxnCoordSenderHeartbeat(t *testing.T) {
defer leaktest.AfterTest(t)()
s := createTestDB(t)
defer s.Stop()
defer teardownHeartbeats(s.Sender)
// Set heartbeat interval to 1ms for testing.
s.Sender.heartbeatInterval = 1 * time.Millisecond
initialTxn := client.NewTxn(*s.DB)
if err := initialTxn.Put(roachpb.Key("a"), []byte("value")); err != nil {
t.Fatal(err)
}
// Verify 3 heartbeats.
var heartbeatTS roachpb.Timestamp
for i := 0; i < 3; i++ {
util.SucceedsSoon(t, func() error {
ok, txn, pErr := getTxn(s.Sender, &initialTxn.Proto)
if !ok || pErr != nil {
t.Fatalf("got txn: %t: %s", ok, pErr)
}
// Advance clock by 1ns.
// Locking the TxnCoordSender to prevent a data race.
s.Sender.Lock()
s.Manual.Increment(1)
s.Sender.Unlock()
if heartbeatTS.Less(*txn.LastHeartbeat) {
heartbeatTS = *txn.LastHeartbeat
return nil
}
return util.Errorf("expected heartbeat")
})
}
}
示例2: processSequenceCache
// processSequenceCache iterates through the local sequence cache entries,
// pushing the transactions (in cleanup mode) for those entries which appear
// to be old enough. In case the transaction indicates that it's terminated,
// the sequence cache keys are included in the result.
func processSequenceCache(snap engine.Engine, rangeID roachpb.RangeID, now, cutoff roachpb.Timestamp, prevTxns map[uuid.UUID]*roachpb.Transaction, infoMu *lockableGCInfo, pushTxn pushFunc) []roachpb.GCRequest_GCKey {
txns := make(map[uuid.UUID]*roachpb.Transaction)
idToKeys := make(map[uuid.UUID][]roachpb.GCRequest_GCKey)
seqCache := NewSequenceCache(rangeID)
infoMu.Lock()
seqCache.Iterate(snap, func(key []byte, txnIDPtr *uuid.UUID, v roachpb.SequenceCacheEntry) {
txnID := *txnIDPtr
// If we've pushed this Txn previously, attempt cleanup (in case the
// push was successful). Initiate new pushes only for newly discovered
// "old" entries.
infoMu.SequenceSpanTotal++
if prevTxn, ok := prevTxns[txnID]; ok && prevTxn.Status != roachpb.PENDING {
txns[txnID] = prevTxn
idToKeys[txnID] = append(idToKeys[txnID], roachpb.GCRequest_GCKey{Key: key})
} else if !cutoff.Less(v.Timestamp) {
infoMu.SequenceSpanConsidered++
txns[txnID] = &roachpb.Transaction{
TxnMeta: roachpb.TxnMeta{ID: txnIDPtr, Key: v.Key},
Status: roachpb.PENDING,
}
idToKeys[txnID] = append(idToKeys[txnID], roachpb.GCRequest_GCKey{Key: key})
}
})
infoMu.Unlock()
var wg sync.WaitGroup
// TODO(tschottdorf): use stopper.LimitedAsyncTask.
wg.Add(len(txns))
for _, txn := range txns {
if txn.Status != roachpb.PENDING {
wg.Done()
continue
}
// Check if the Txn is still alive. If this indicates that the Txn is
// aborted and old enough to guarantee that any running coordinator
// would have realized that the transaction wasn't running by means
// of a heartbeat, then we're free to remove the sequence cache entry.
// In the most likely case, there isn't even an entry (which will
// be apparent by a zero timestamp and nil last heartbeat).
go pushTxn(now, txn, roachpb.PUSH_TOUCH, &wg)
}
wg.Wait()
var gcKeys []roachpb.GCRequest_GCKey
for txnID, txn := range txns {
if txn.Status == roachpb.PENDING {
continue
}
ts := txn.Timestamp
if txn.LastHeartbeat != nil {
ts.Forward(*txn.LastHeartbeat)
}
if !cutoff.Less(ts) {
// This is it, we can delete our sequence cache entries.
gcKeys = append(gcKeys, idToKeys[txnID]...)
infoMu.SequenceSpanGCNum++
}
}
return gcKeys
}
示例3: Add
// Add the specified timestamp to the cache as covering the range of
// keys from start to end. If end is nil, the range covers the start
// key only. txnID is nil for no transaction. readOnly specifies
// whether the command adding this timestamp was read-only or not.
func (tc *TimestampCache) Add(start, end roachpb.Key, timestamp roachpb.Timestamp, txnID []byte, readOnly bool) {
// This gives us a memory-efficient end key if end is empty.
if len(end) == 0 {
end = start.Next()
start = end[:len(start)]
}
if tc.latest.Less(timestamp) {
tc.latest = timestamp
}
// Only add to the cache if the timestamp is more recent than the
// low water mark.
if tc.lowWater.Less(timestamp) {
// Check existing, overlapping entries. Remove superseded
// entries or return without adding this entry if necessary.
key := tc.cache.NewKey(start, end)
for _, o := range tc.cache.GetOverlaps(start, end) {
ce := o.Value.(cacheEntry)
if ce.readOnly != readOnly {
continue
}
if o.Key.Contains(key) && !ce.timestamp.Less(timestamp) {
return // don't add this key; there's already a cache entry with >= timestamp.
} else if key.Contains(o.Key) && !timestamp.Less(ce.timestamp) {
tc.cache.Del(o.Key) // delete existing key; this cache entry supersedes.
}
}
ce := cacheEntry{timestamp: timestamp, txnID: txnID, readOnly: readOnly}
tc.cache.Add(key, ce)
}
}
示例4: UpdateDeadlineMaybe
// UpdateDeadlineMaybe sets the transactions deadline to the lower of the
// current one (if any) and the passed value.
func (txn *Txn) UpdateDeadlineMaybe(deadline roachpb.Timestamp) bool {
if txn.deadline == nil || deadline.Less(*txn.deadline) {
txn.deadline = &deadline
return true
}
return false
}
示例5: processSequenceCache
// processSequenceCache iterates through the local sequence cache entries,
// pushing the transactions (in cleanup mode) for those entries which appear
// to be old enough. In case the transaction indicates that it's terminated,
// the sequence cache keys are included in the result.
func (gcq *gcQueue) processSequenceCache(r *Replica, now, cutoff roachpb.Timestamp, prevTxns map[uuid.UUID]*roachpb.Transaction) []roachpb.GCRequest_GCKey {
snap := r.store.Engine().NewSnapshot()
defer snap.Close()
txns := make(map[uuid.UUID]*roachpb.Transaction)
idToKeys := make(map[uuid.UUID][]roachpb.GCRequest_GCKey)
r.sequence.Iterate(snap, func(key []byte, txnIDPtr *uuid.UUID, v roachpb.SequenceCacheEntry) {
txnID := *txnIDPtr
// If we've pushed this Txn previously, attempt cleanup (in case the
// push was successful). Initiate new pushes only for newly discovered
// "old" entries.
if prevTxn, ok := prevTxns[txnID]; ok && prevTxn.Status != roachpb.PENDING {
txns[txnID] = prevTxn
idToKeys[txnID] = append(idToKeys[txnID], roachpb.GCRequest_GCKey{Key: key})
} else if !cutoff.Less(v.Timestamp) {
txns[txnID] = &roachpb.Transaction{
TxnMeta: roachpb.TxnMeta{ID: txnIDPtr, Key: v.Key},
Status: roachpb.PENDING,
}
idToKeys[txnID] = append(idToKeys[txnID], roachpb.GCRequest_GCKey{Key: key})
}
})
var wg sync.WaitGroup
wg.Add(len(txns))
// TODO(tschottdorf): a lot of these transactions will be on our local range,
// so we should simply read those from a snapshot, and only push those which
// are PENDING.
for _, txn := range txns {
// Check if the Txn is still alive. If this indicates that the Txn is
// aborted and old enough to guarantee that any running coordinator
// would have realized that the transaction wasn't running by means
// of a heartbeat, then we're free to remove the sequence cache entry.
// In the most likely case, there isn't even an entry (which will
// be apparent by a zero timestamp and nil last heartbeat).
go gcq.pushTxn(r, now, txn, roachpb.PUSH_TOUCH, &wg)
}
wg.Wait()
var gcKeys []roachpb.GCRequest_GCKey
for txnID, txn := range txns {
if txn.Status == roachpb.PENDING {
continue
}
ts := txn.Timestamp
if txn.LastHeartbeat != nil {
ts.Forward(*txn.LastHeartbeat)
}
if !cutoff.Less(ts) {
// This is it, we can delete our sequence cache entries.
gcKeys = append(gcKeys, idToKeys[txnID]...)
}
}
return gcKeys
}
示例6: shouldQueue
// shouldQueue determines whether a replica should be queued for GC,
// and if so at what priority. To be considered for possible GC, a
// replica's leader lease must not have been active for longer than
// ReplicaGCQueueInactivityThreshold. Further, the last replica GC
// check must have occurred more than ReplicaGCQueueInactivityThreshold
// in the past.
func (*replicaGCQueue) shouldQueue(now roachpb.Timestamp, rng *Replica, _ config.SystemConfig) (bool, float64) {
lastCheck, err := rng.getLastReplicaGCTimestamp()
if err != nil {
log.Errorf("could not read last replica GC timestamp: %s", err)
return false, 0
}
// Return false immediately if the previous check was less than the
// check interval in the past.
if now.Less(lastCheck.Add(ReplicaGCQueueInactivityThreshold.Nanoseconds(), 0)) {
return false, 0
}
// Return whether or not lease activity occurred within the inactivity threshold.
return rng.getLeaderLease().Expiration.Add(ReplicaGCQueueInactivityThreshold.Nanoseconds(), 0).Less(now), 0
}
示例7: processSequenceCache
// processSequenceCache iterates through the local sequence cache entries,
// pushing the transactions (in cleanup mode) for those entries which appear
// to be old enough. In case the transaction indicates that it's terminated,
// the sequence cache keys are included in the result.
func processSequenceCache(r *Replica, now, cutoff roachpb.Timestamp, prevTxns map[string]*roachpb.Transaction) []roachpb.GCRequest_GCKey {
snap := r.store.Engine().NewSnapshot()
defer snap.Close()
txns := make(map[string]*roachpb.Transaction)
idToKeys := make(map[string][]roachpb.GCRequest_GCKey)
r.sequence.Iterate(snap, func(key, id []byte, v roachpb.SequenceCacheEntry) {
idStr := string(id)
// If we've pushed this Txn previously, attempt cleanup (in case the
// push was successful). Initiate new pushes only for newly discovered
// "old" entries.
if prevTxn, ok := prevTxns[idStr]; ok && prevTxn.Status != roachpb.PENDING {
txns[idStr] = prevTxn
idToKeys[idStr] = append(idToKeys[idStr], roachpb.GCRequest_GCKey{Key: key})
} else if !cutoff.Less(v.Timestamp) {
txns[idStr] = &roachpb.Transaction{ID: id, Key: v.Key, Status: roachpb.PENDING}
idToKeys[idStr] = append(idToKeys[idStr], roachpb.GCRequest_GCKey{Key: key})
}
})
var wg sync.WaitGroup
wg.Add(len(txns))
for _, txn := range txns {
// Check if the Txn is still alive. If this indicates that the Txn is
// aborted and old enough to guarantee that any running coordinator
// would have realized that the transaction wasn't running by means
// of a heartbeat, then we're free to remove the sequence cache entry.
// In the most likely case, there isn't even an entry (which will
// be apparent by a zero timestamp and nil last heartbeat).
go pushTxn(r, now, txn, roachpb.CLEANUP_TXN, &wg)
}
wg.Wait()
var gcKeys []roachpb.GCRequest_GCKey
for idStr, txn := range txns {
if txn.Status == roachpb.PENDING {
continue
}
ts := txn.Timestamp
if txn.LastHeartbeat != nil {
ts.Forward(*txn.LastHeartbeat)
}
if !cutoff.Less(ts) {
// This is it, we can delete our sequence cache entries.
gcKeys = append(gcKeys, idToKeys[idStr]...)
}
}
return gcKeys
}
示例8: TestTxnCoordSenderHeartbeat
// TestTxnCoordSenderHeartbeat verifies periodic heartbeat of the
// transaction record.
func TestTxnCoordSenderHeartbeat(t *testing.T) {
defer leaktest.AfterTest(t)
s := createTestDB(t)
defer s.Stop()
defer teardownHeartbeats(s.Sender)
// Set heartbeat interval to 1ms for testing.
s.Sender.heartbeatInterval = 1 * time.Millisecond
initialTxn := newTxn(s.Clock, roachpb.Key("a"))
put, h := createPutRequest(roachpb.Key("a"), []byte("value"), initialTxn)
if reply, err := client.SendWrappedWith(s.Sender, nil, h, put); err != nil {
t.Fatal(err)
} else {
*initialTxn = *reply.Header().Txn
}
// Verify 3 heartbeats.
var heartbeatTS roachpb.Timestamp
for i := 0; i < 3; i++ {
if err := util.IsTrueWithin(func() bool {
ok, txn, err := getTxn(s.Sender, initialTxn)
if !ok || err != nil {
return false
}
// Advance clock by 1ns.
// Locking the TxnCoordSender to prevent a data race.
s.Sender.Lock()
s.Manual.Increment(1)
s.Sender.Unlock()
if heartbeatTS.Less(*txn.LastHeartbeat) {
heartbeatTS = *txn.LastHeartbeat
return true
}
return false
}, 50*time.Millisecond); err != nil {
t.Error("expected initial heartbeat within 50ms")
}
}
}
示例9: ExampleNewClock
// ExampleNewClock shows how to create a new
// hybrid logical clock based on the local machine's
// physical clock. The sanity checks in this example
// will, of course, not fail and the output will be
// the age of the Unix epoch in nanoseconds.
func ExampleNewClock() {
// Initialize a new clock, using the local
// physical clock.
c := NewClock(UnixNano)
// Update the state of the hybrid clock.
s := c.Now()
time.Sleep(50 * time.Nanosecond)
t := roachpb.Timestamp{WallTime: UnixNano()}
// The sanity checks below will usually never be triggered.
if s.Less(t) || !t.Less(s) {
log.Fatalf("The later timestamp is smaller than the earlier one")
}
if t.WallTime-s.WallTime > 0 {
log.Fatalf("HLC timestamp %d deviates from physical clock %d", s, t)
}
if s.Logical > 0 {
log.Fatalf("Trivial timestamp has logical component")
}
fmt.Printf("The Unix Epoch is now approximately %dns old.\n", t.WallTime)
}
示例10: TestTxnCoordSenderHeartbeat
// TestTxnCoordSenderHeartbeat verifies periodic heartbeat of the
// transaction record.
func TestTxnCoordSenderHeartbeat(t *testing.T) {
defer leaktest.AfterTest(t)()
s, sender := createTestDB(t)
defer s.Stop()
defer teardownHeartbeats(sender)
// Set heartbeat interval to 1ms for testing.
sender.heartbeatInterval = 1 * time.Millisecond
initialTxn := client.NewTxn(context.Background(), *s.DB)
if err := initialTxn.Put(roachpb.Key("a"), []byte("value")); err != nil {
t.Fatal(err)
}
// Verify 3 heartbeats.
var heartbeatTS roachpb.Timestamp
for i := 0; i < 3; i++ {
util.SucceedsSoon(t, func() error {
txn, pErr := getTxn(sender, &initialTxn.Proto)
if pErr != nil {
t.Fatal(pErr)
}
// Advance clock by 1ns.
// Locking the TxnCoordSender to prevent a data race.
sender.Lock()
s.Manual.Increment(1)
sender.Unlock()
if txn.LastHeartbeat != nil && heartbeatTS.Less(*txn.LastHeartbeat) {
heartbeatTS = *txn.LastHeartbeat
return nil
}
return util.Errorf("expected heartbeat")
})
}
// Sneakily send an ABORT right to DistSender (bypassing TxnCoordSender).
{
var ba roachpb.BatchRequest
ba.Add(&roachpb.EndTransactionRequest{
Commit: false,
Span: roachpb.Span{Key: initialTxn.Proto.Key},
})
ba.Txn = &initialTxn.Proto
if _, pErr := sender.wrapped.Send(context.Background(), ba); pErr != nil {
t.Fatal(pErr)
}
}
util.SucceedsSoon(t, func() error {
sender.Lock()
defer sender.Unlock()
if txnMeta, ok := sender.txns[*initialTxn.Proto.ID]; !ok {
t.Fatal("transaction unregistered prematurely")
} else if txnMeta.txn.Status != roachpb.ABORTED {
return fmt.Errorf("transaction is not aborted")
}
return nil
})
// Trying to do something else should give us a TransactionAbortedError.
_, err := initialTxn.Get("a")
assertTransactionAbortedError(t, err)
}
示例11: Add
// Add the specified timestamp to the cache as covering the range of
// keys from start to end. If end is nil, the range covers the start
// key only. txnID is nil for no transaction. readTSCache specifies
// whether the command adding this timestamp should update the read
// timestamp; false to update the write timestamp cache.
func (tc *TimestampCache) Add(start, end roachpb.Key, timestamp roachpb.Timestamp, txnID *uuid.UUID, readTSCache bool) {
// This gives us a memory-efficient end key if end is empty.
if len(end) == 0 {
end = start.Next()
start = end[:len(start)]
}
if tc.latest.Less(timestamp) {
tc.latest = timestamp
}
// Only add to the cache if the timestamp is more recent than the
// low water mark.
if tc.lowWater.Less(timestamp) {
cache := tc.wCache
if readTSCache {
cache = tc.rCache
}
addRange := func(r interval.Range) {
value := cacheValue{timestamp: timestamp, txnID: txnID}
key := cache.MakeKey(r.Start, r.End)
entry := makeCacheEntry(key, value)
cache.AddEntry(entry)
}
r := interval.Range{
Start: interval.Comparable(start),
End: interval.Comparable(end),
}
// Check existing, overlapping entries and truncate/split/remove if
// superseded and in the past. If existing entries are in the future,
// subtract from the range/ranges that need to be added to cache.
for _, o := range cache.GetOverlaps(r.Start, r.End) {
cv := o.Value.(*cacheValue)
sCmp := r.Start.Compare(o.Key.Start)
eCmp := r.End.Compare(o.Key.End)
if !timestamp.Less(cv.timestamp) {
// The existing interval has a timestamp less than or equal to the new interval.
// Compare interval ranges to determine how to modify existing interval.
switch {
case sCmp == 0 && eCmp == 0:
// New and old are equal; replace old with new and avoid the need to insert new.
//
// New: ------------
// Old: ------------
//
// New: ------------
*cv = cacheValue{timestamp: timestamp, txnID: txnID}
cache.MoveToEnd(o.Entry)
return
case sCmp <= 0 && eCmp >= 0:
// New contains or is equal to old; delete old.
//
// New: ------------ ------------ ------------
// Old: -------- or ---------- or ----------
//
// Old:
cache.DelEntry(o.Entry)
case sCmp > 0 && eCmp < 0:
// Old contains new; split up old into two.
//
// New: ----
// Old: ------------
//
// Old: ---- ----
oldEnd := o.Key.End
o.Key.End = r.Start
key := cache.MakeKey(r.End, oldEnd)
entry := makeCacheEntry(key, *cv)
cache.AddEntryAfter(entry, o.Entry)
case eCmp >= 0:
// Left partial overlap; truncate old end.
//
// New: -------- --------
// Old: -------- or ------------
//
// Old: ---- ----
o.Key.End = r.Start
case sCmp <= 0:
// Right partial overlap; truncate old start.
//
// New: -------- --------
// Old: -------- or ------------
//
// Old: ---- ----
o.Key.Start = r.End
default:
panic(fmt.Sprintf("no overlap between %v and %v", o.Key.Range, r))
}
} else {
// The existing interval has a timestamp greater than the new interval.
// Compare interval ranges to determine how to modify new interval before
// adding it to the timestamp cache.
switch {
case sCmp >= 0 && eCmp <= 0:
//.........這裏部分代碼省略.........