本文整理汇总了Golang中github.com/couchbase/sync_gateway/base.EnableLogKey函数的典型用法代码示例。如果您正苦于以下问题:Golang EnableLogKey函数的具体用法?Golang EnableLogKey怎么用?Golang EnableLogKey使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了EnableLogKey函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: TestIndexChangesAdminBackfill
func TestIndexChangesAdminBackfill(t *testing.T) {
db := setupTestDBForChangeIndex(t)
defer tearDownTestDB(t, db)
base.EnableLogKey("IndexChanges")
base.EnableLogKey("Hash+")
base.EnableLogKey("Changes+")
base.EnableLogKey("Backfill")
db.ChannelMapper = channels.NewDefaultChannelMapper()
// Create a user with access to channel ABC
authenticator := db.Authenticator()
user, _ := authenticator.NewUser("naomi", "letmein", channels.SetOf("ABC"))
user.SetSequence(1)
authenticator.Save(user)
// Create docs on multiple channels:
db.Put("both_1", Body{"channels": []string{"ABC", "PBS"}})
db.Put("doc0000609", Body{"channels": []string{"PBS"}})
db.Put("doc0000799", Body{"channels": []string{"ABC"}})
time.Sleep(100 * time.Millisecond)
// Check the _changes feed:
db.user, _ = authenticator.GetUser("naomi")
changes, err := db.GetChanges(base.SetOf("*"), getZeroSequence(db))
assertNoError(t, err, "Couldn't GetChanges")
printChanges(changes)
assert.Equals(t, len(changes), 3)
// Modify user to have access to both channels:
log.Println("Get Principal")
userInfo, err := db.GetPrincipal("naomi", true)
assert.True(t, userInfo != nil)
userInfo.ExplicitChannels = base.SetOf("ABC", "PBS")
_, err = db.UpdatePrincipal(*userInfo, true, true)
assertNoError(t, err, "UpdatePrincipal failed")
time.Sleep(100 * time.Millisecond)
// Write a few more docs (that should be returned as non-backfill)
db.Put("doc_nobackfill_1", Body{"channels": []string{"PBS"}})
db.Put("doc_nobackfill_2", Body{"channels": []string{"PBS"}})
time.Sleep(100 * time.Millisecond)
// Check the _changes feed:
log.Println("Get User")
db.user, _ = authenticator.GetUser("naomi")
db.changeCache.waitForSequence(1)
time.Sleep(100 * time.Millisecond)
lastSeq := getLastSeq(changes)
lastSeq, _ = db.ParseSequenceID(lastSeq.String())
changes, err = db.GetChanges(base.SetOf("*"), ChangesOptions{Since: lastSeq})
assertNoError(t, err, "Couldn't GetChanges")
printChanges(changes)
assert.Equals(t, len(changes), 5)
verifyChange(t, changes, "both_1", true)
verifyChange(t, changes, "doc0000609", true)
verifyChange(t, changes, "doc_nobackfill_1", false)
verifyChange(t, changes, "doc_nobackfill_2", false)
}
示例2: TestChannelCacheBufferingWithUserDoc
// Test notification when buffered entries are processed after a user doc arrives.
func TestChannelCacheBufferingWithUserDoc(t *testing.T) {
base.EnableLogKey("Cache")
base.EnableLogKey("Cache+")
base.EnableLogKey("Changes")
base.EnableLogKey("Changes+")
db := setupTestDBWithCacheOptions(t, CacheOptions{})
defer tearDownTestDB(t, db)
db.ChannelMapper = channels.NewDefaultChannelMapper()
// Simulate seq 1 (user doc) being delayed - write 2 first
WriteDirect(db, []string{"ABC"}, 2)
// Start wait for doc in ABC
waiter := db.tapListener.NewWaiterWithChannels(channels.SetOf("ABC"), nil)
successChan := make(chan bool)
go func() {
waiter.Wait()
close(successChan)
}()
// Simulate a user doc update
WriteUserDirect(db, "bernard", 1)
// Wait 3 seconds for notification, else fail the test.
select {
case <-successChan:
log.Println("notification successful")
case <-time.After(time.Second * 3):
assertFailed(t, "No notification after 3 seconds")
}
}
示例3: TestChannelCacheBackfill
// Test backfill of late arriving sequences to the channel caches
func TestChannelCacheBackfill(t *testing.T) {
base.EnableLogKey("Cache")
base.EnableLogKey("Changes+")
db := setupTestDBWithCacheOptions(t, shortWaitCache())
defer tearDownTestDB(t, db)
db.ChannelMapper = channels.NewDefaultChannelMapper()
// Create a user with access to channel ABC
authenticator := db.Authenticator()
user, _ := authenticator.NewUser("naomi", "letmein", channels.SetOf("ABC", "PBS", "NBC", "TBS"))
authenticator.Save(user)
// Simulate seq 3 being delayed - write 1,2,4,5
WriteDirect(db, []string{"ABC", "NBC"}, 1)
WriteDirect(db, []string{"ABC"}, 2)
WriteDirect(db, []string{"ABC", "PBS"}, 5)
WriteDirect(db, []string{"ABC", "PBS"}, 6)
// Test that retrieval isn't blocked by skipped sequences
db.changeCache.waitForSequenceID(SequenceID{Seq: 6})
db.user, _ = authenticator.GetUser("naomi")
changes, err := db.GetChanges(base.SetOf("*"), ChangesOptions{Since: SequenceID{Seq: 0}})
assertNoError(t, err, "Couldn't GetChanges")
assert.Equals(t, len(changes), 4)
assert.DeepEquals(t, changes[0], &ChangeEntry{
Seq: SequenceID{Seq: 1, TriggeredBy: 0, LowSeq: 2},
ID: "doc-1",
Changes: []ChangeRev{{"rev": "1-a"}}})
lastSeq := changes[len(changes)-1].Seq
// Validate insert to various cache states
WriteDirect(db, []string{"ABC", "NBC", "PBS", "TBS"}, 3)
WriteDirect(db, []string{"CBS"}, 7)
db.changeCache.waitForSequenceID(SequenceID{Seq: 7})
// verify insert at start (PBS)
pbsCache := db.changeCache.getChannelCache("PBS")
assert.True(t, verifyCacheSequences(pbsCache, []uint64{3, 5, 6}))
// verify insert at middle (ABC)
abcCache := db.changeCache.getChannelCache("ABC")
assert.True(t, verifyCacheSequences(abcCache, []uint64{1, 2, 3, 5, 6}))
// verify insert at end (NBC)
nbcCache := db.changeCache.getChannelCache("NBC")
assert.True(t, verifyCacheSequences(nbcCache, []uint64{1, 3}))
// verify insert to empty cache (TBS)
tbsCache := db.changeCache.getChannelCache("TBS")
assert.True(t, verifyCacheSequences(tbsCache, []uint64{3}))
// verify changes has three entries (needs to resend all since previous LowSeq, which
// will be the late arriver (3) along with 5, 6)
changes, err = db.GetChanges(base.SetOf("*"), ChangesOptions{Since: lastSeq})
assert.Equals(t, len(changes), 3)
assert.DeepEquals(t, changes[0], &ChangeEntry{
Seq: SequenceID{Seq: 3, LowSeq: 3},
ID: "doc-3",
Changes: []ChangeRev{{"rev": "1-a"}}})
}
示例4: TestDenseBlockMultipleUpdates
func TestDenseBlockMultipleUpdates(t *testing.T) {
base.EnableLogKey("ChannelStorage")
base.EnableLogKey("ChannelStorage+")
indexBucket := testIndexBucket()
defer indexBucket.Close()
block := NewDenseBlock("block1", nil)
// Inserts
entries := make([]*LogEntry, 10)
for i := 0; i < 10; i++ {
vbno := 10*i + 1
sequence := i + 1
entries[i] = makeBlockEntry(fmt.Sprintf("doc%d", i), "1-abc", vbno, sequence, IsNotRemoval, IsAdded)
}
overflow, pendingRemoval, updateClock, err := block.AddEntrySet(entries, indexBucket)
assertNoError(t, err, "Error adding entry set")
assert.Equals(t, len(overflow), 0)
assert.Equals(t, len(pendingRemoval), 0)
assert.Equals(t, block.getEntryCount(), uint16(10))
foundEntries := block.GetAllEntries()
assert.Equals(t, len(foundEntries), 10)
for i := 0; i < 10; i++ {
vbno := 10*i + 1
sequence := i + 1
assertLogEntry(t, foundEntries[i], fmt.Sprintf("doc%d", i), "1-abc", vbno, sequence)
assert.Equals(t, updateClock.GetSequence(uint16(i*10+1)), uint64(i+1))
}
// Updates
entries = make([]*LogEntry, 10)
for i := 0; i < 10; i++ {
vbno := 10*i + 1
sequence := i + 21
entries[i] = makeBlockEntry(fmt.Sprintf("doc%d", i), "2-abc", vbno, sequence, IsNotRemoval, IsNotAdded)
entries[i].PrevSequence = uint64(i + 1)
}
overflow, pendingRemoval, updateClock, err = block.AddEntrySet(entries, indexBucket)
assertNoError(t, err, "Error adding entry set")
assert.Equals(t, len(overflow), 0)
assert.Equals(t, len(pendingRemoval), 0)
assert.Equals(t, int(block.getEntryCount()), 10)
foundEntries = block.GetAllEntries()
assert.Equals(t, len(foundEntries), 10)
for i := 0; i < 10; i++ {
assertLogEntry(t, foundEntries[i], fmt.Sprintf("doc%d", i), "2-abc", 10*i+1, 21+i)
assert.Equals(t, updateClock.GetSequence(uint16(i*10+1)), uint64(i+21))
}
}
示例5: TestChangeIndexChanges
func TestChangeIndexChanges(t *testing.T) {
base.EnableLogKey("DIndex+")
db := setupTestDBForChangeIndex(t)
defer tearDownTestDB(t, db)
db.ChannelMapper = channels.NewDefaultChannelMapper()
// Create a user with access to channel ABC
authenticator := db.Authenticator()
user, _ := authenticator.NewUser("naomi", "letmein", channels.SetOf("ABC", "PBS", "NBC", "TBS"))
authenticator.Save(user)
// Write an entry to the bucket
WriteDirectWithKey(db, "1c856b5724dcf4273c3993619900ce7f", []string{}, 1)
time.Sleep(20 * time.Millisecond)
changes, err := db.GetChanges(base.SetOf("*"), ChangesOptions{Since: simpleClockSequence(0)})
assert.True(t, err == nil)
assert.Equals(t, len(changes), 1)
time.Sleep(20 * time.Millisecond)
// Write a few more entries to the bucket
WriteDirectWithKey(db, "12389b182ababd12fff662848edeb908", []string{}, 1)
time.Sleep(20 * time.Millisecond)
changes, err = db.GetChanges(base.SetOf("*"), ChangesOptions{Since: simpleClockSequence(0)})
assert.True(t, err == nil)
assert.Equals(t, len(changes), 2)
}
示例6: TestChangeIndexAddSet
func TestChangeIndexAddSet(t *testing.T) {
base.EnableLogKey("DIndex+")
changeIndex, bucket := testKvChangeIndex("indexBucket")
defer changeIndex.Stop()
entries := make([]*LogEntry, 1000)
for vb := 0; vb < 1000; vb++ {
entries[vb] = channelEntry(uint16(vb), 1, fmt.Sprintf("foo%d", vb), "1-a", []string{"ABC"})
}
indexPartitions := testPartitionMap()
channelStorage := NewChannelStorage(bucket, "", indexPartitions)
changeIndex.writer.indexEntries(entries, indexPartitions.VbMap, channelStorage)
// wait for add to complete
time.Sleep(50 * time.Millisecond)
// Verify channel clocks
channelClock := base.SequenceClockImpl{}
chanClockBytes, _, err := bucket.GetRaw(getChannelClockKey("ABC"))
err = channelClock.Unmarshal(chanClockBytes)
assertNoError(t, err, "Unmarshal channel clock sequence")
starChannelClock := base.SequenceClockImpl{}
chanClockBytes, _, err = bucket.GetRaw(getChannelClockKey("*"))
err = starChannelClock.Unmarshal(chanClockBytes)
for vb := uint16(0); vb < 1000; vb++ {
assert.Equals(t, channelClock.GetSequence(vb), uint64(1))
assert.Equals(t, starChannelClock.GetSequence(vb), uint64(1))
}
}
示例7: TestLateSequenceAsFirst
func TestLateSequenceAsFirst(t *testing.T) {
base.EnableLogKey("Cache")
context := testBucketContext()
cache := newChannelCache(context, "Test1", 0)
// Add some entries to cache
cache.addToCache(e(5, "doc1", "1-a"), false)
cache.addToCache(e(10, "doc2", "2-a"), false)
cache.addToCache(e(15, "doc3", "3-a"), false)
entries, err := cache.GetChanges(ChangesOptions{Since: SequenceID{Seq: 0}})
assert.Equals(t, len(entries), 3)
assert.True(t, verifyChannelSequences(entries, []uint64{5, 10, 15}))
assert.True(t, verifyChannelDocIDs(entries, []string{"doc1", "doc2", "doc3"}))
assert.True(t, err == nil)
// Add a late-arriving sequence
cache.AddLateSequence(e(3, "doc0", "0-a"))
cache.addToCache(e(3, "doc0", "0-a"), false)
entries, err = cache.GetChanges(ChangesOptions{Since: SequenceID{Seq: 0}})
assert.Equals(t, len(entries), 4)
writeEntries(entries)
assert.True(t, verifyChannelSequences(entries, []uint64{3, 5, 10, 15}))
assert.True(t, verifyChannelDocIDs(entries, []string{"doc0", "doc1", "doc2", "doc3"}))
assert.True(t, err == nil)
}
示例8: TestChangeIndexAddEntry
func TestChangeIndexAddEntry(t *testing.T) {
base.EnableLogKey("DIndex+")
changeIndex, bucket := testKvChangeIndex("indexBucket")
defer changeIndex.Stop()
changeIndex.writer.addToCache(channelEntry(1, 1, "foo1", "1-a", []string{"ABC", "CBS"}))
// wait for add
time.Sleep(50 * time.Millisecond)
// Verify entry
var entry LogEntry
entryBytes, _, err := bucket.GetRaw("_idx_entry:1:1")
assert.True(t, err == nil)
json.Unmarshal(entryBytes, &entry)
assert.Equals(t, entry.DocID, "foo1")
assert.Equals(t, entry.Sequence, uint64(1))
assert.Equals(t, entry.RevID, "1-a")
// Verify Channel Index Block
partitions, err := changeIndex.getIndexPartitions()
assertNoError(t, err, "Get index partitions")
block := NewIndexBlock("ABC", 1, 1, partitions)
blockBytes, _, err := bucket.GetRaw(getIndexBlockKey("ABC", 0, 0))
bucket.Dump()
err = block.Unmarshal(blockBytes)
assertNoError(t, err, "Unmarshal block")
allEntries := block.GetAllEntries()
assert.Equals(t, len(allEntries), 1)
// Verify stable sequence
stableClock, err := changeIndex.GetStableClock(false)
assertNoError(t, err, "Get stable clock")
assert.Equals(t, stableClock.GetSequence(1), uint64(1))
assert.Equals(t, stableClock.GetSequence(2), uint64(0))
// Verify channel sequences
channelClock := base.SequenceClockImpl{}
chanClockBytes, _, err := bucket.GetRaw(getChannelClockKey("ABC"))
log.Println("key:", getChannelClockKey("ABC"))
log.Println("bytes:", chanClockBytes)
err = channelClock.Unmarshal(chanClockBytes)
log.Println("chan ABC", channelClock.GetSequence(1))
assertNoError(t, err, "Unmarshal channel clock sequence")
assert.Equals(t, channelClock.GetSequence(1), uint64(1))
assert.Equals(t, channelClock.GetSequence(2), uint64(0))
channelClock = base.SequenceClockImpl{}
chanClockBytes, _, err = bucket.GetRaw(getChannelClockKey("CBS"))
err = channelClock.Unmarshal(chanClockBytes)
assertNoError(t, err, "Unmarshal channel clock sequence")
assert.Equals(t, channelClock.GetSequence(1), uint64(1))
assert.Equals(t, channelClock.GetSequence(2), uint64(0))
}
示例9: BenchmarkChannelCacheRepeatedDocs95
func BenchmarkChannelCacheRepeatedDocs95(b *testing.B) {
base.EnableLogKey("CacheTest")
//base.SetLogLevel(2) // disables logging
context := testBucketContext()
cache := newChannelCache(context, "Benchmark", 0)
// generate doc IDs
docIDs, revStrings := generateDocs(95.0, b.N)
b.ResetTimer()
for i := 0; i < b.N; i++ {
cache.addToCache(e(uint64(i), docIDs[i], revStrings[i]), false)
}
}
示例10: RaceTestPollResultLongRunningContinuous
// Currently disabled, due to test race conditions between the continuous changes start (in its own goroutine),
// and the send of the continuous terminator. We can't ensure that the changes request has been
// started before all other test operations have been sent (so that we never break out of the changes loop)
func RaceTestPollResultLongRunningContinuous(t *testing.T) {
// Reset the index expvars
indexExpvars.Init()
base.EnableLogKey("IndexPoll")
db := setupTestDBForChangeIndex(t)
defer tearDownTestDB(t, db)
db.ChannelMapper = channels.NewDefaultChannelMapper()
WriteDirectWithKey(db, "docABC_1", []string{"ABC"}, 1)
time.Sleep(100 * time.Millisecond)
// Do a basic changes to trigger start of polling for channel
changes, err := db.GetChanges(base.SetOf("ABC"), ChangesOptions{Since: simpleClockSequence(0)})
assertTrue(t, err == nil, "Error getting changes")
assert.Equals(t, len(changes), 1)
log.Printf("Changes:%+v", changes[0])
// Start a continuous changes on channel (ABC). Waitgroup keeps test open until continuous is terminated
var wg sync.WaitGroup
continuousTerminator := make(chan bool)
wg.Add(1)
go func() {
defer wg.Done()
since, err := db.ParseSequenceID("2-0")
abcChanges, err := db.GetChanges(base.SetOf("ABC"), ChangesOptions{Since: since, Wait: true, Continuous: true, Terminator: continuousTerminator})
assertTrue(t, err == nil, "Error getting changes")
log.Printf("Got %d changes", len(abcChanges))
log.Println("Continuous completed")
}()
for i := 0; i < 10000; i++ {
WriteDirectWithKey(db, fmt.Sprintf("docABC_%d", i), []string{"ABC"}, 3)
time.Sleep(1 * time.Millisecond)
}
time.Sleep(1000 * time.Millisecond) // wait for indexing, polling, and changes processing
close(continuousTerminator)
log.Println("closed terminator")
time.Sleep(100 * time.Millisecond)
WriteDirectWithKey(db, "terminatorCheck", []string{"ABC"}, 1)
wg.Wait()
}
示例11: TestDenseBlockList
// --------------------
// DenseBlockList Tests
// --------------------
func TestDenseBlockList(t *testing.T) {
base.EnableLogKey("ChannelStorage+")
indexBucket := testIndexBucket()
defer indexBucket.Close()
// Initialize a new block list. Will initialize with first block
list := NewDenseBlockList("ABC", 1, indexBucket)
// Simple insert
partitionClock := makePartitionClock(
[]uint16{1, 3, 6, 11},
[]uint64{0, 0, 0, 0},
)
_, err := list.AddBlock()
assertNoError(t, err, "Error adding block to blocklist")
indexBucket.Dump()
// Create a new instance of the same block list, validate contents
newList := NewDenseBlockList("ABC", 1, indexBucket)
assert.Equals(t, len(newList.blocks), 2)
assert.Equals(t, newList.blocks[0].BlockIndex, 0)
// Add a few more blocks to the new list
partitionClock.incrementPartitionClock(1)
_, err = newList.AddBlock()
assertNoError(t, err, "Error adding block2 to blocklist")
assert.Equals(t, len(newList.blocks), 3)
assert.Equals(t, newList.blocks[0].BlockIndex, 0)
assert.Equals(t, newList.blocks[1].BlockIndex, 1)
// Attempt to add a block via original list. Should be cancelled due to cas
// mismatch, and reload the current state (i.e. newList)
partitionClock.incrementPartitionClock(1)
list.AddBlock()
assert.Equals(t, len(list.blocks), 3)
assert.Equals(t, newList.blocks[0].BlockIndex, 0)
assert.Equals(t, newList.blocks[1].BlockIndex, 1)
}
示例12: TestDuplicateDocID
func TestDuplicateDocID(t *testing.T) {
base.EnableLogKey("Cache")
context := testBucketContext()
cache := newChannelCache(context, "Test1", 0)
// Add some entries to cache
cache.addToCache(e(1, "doc1", "1-a"), false)
cache.addToCache(e(2, "doc3", "3-a"), false)
cache.addToCache(e(3, "doc5", "5-a"), false)
entries, err := cache.GetChanges(ChangesOptions{Since: SequenceID{Seq: 0}})
assert.Equals(t, len(entries), 3)
assert.True(t, verifyChannelSequences(entries, []uint64{1, 2, 3}))
assert.True(t, verifyChannelDocIDs(entries, []string{"doc1", "doc3", "doc5"}))
assert.True(t, err == nil)
// Add a new revision matching mid-list
cache.addToCache(e(4, "doc3", "3-b"), false)
entries, err = cache.GetChanges(ChangesOptions{Since: SequenceID{Seq: 0}})
assert.Equals(t, len(entries), 3)
assert.True(t, verifyChannelSequences(entries, []uint64{1, 3, 4}))
assert.True(t, verifyChannelDocIDs(entries, []string{"doc1", "doc5", "doc3"}))
assert.True(t, err == nil)
// Add a new revision matching first
cache.addToCache(e(5, "doc1", "1-b"), false)
entries, err = cache.GetChanges(ChangesOptions{Since: SequenceID{Seq: 0}})
assert.Equals(t, len(entries), 3)
assert.True(t, verifyChannelSequences(entries, []uint64{3, 4, 5}))
assert.True(t, verifyChannelDocIDs(entries, []string{"doc5", "doc3", "doc1"}))
assert.True(t, err == nil)
// Add a new revision matching last
cache.addToCache(e(6, "doc1", "1-c"), false)
entries, err = cache.GetChanges(ChangesOptions{Since: SequenceID{Seq: 0}})
assert.Equals(t, len(entries), 3)
assert.True(t, verifyChannelSequences(entries, []uint64{3, 4, 6}))
assert.True(t, verifyChannelDocIDs(entries, []string{"doc5", "doc3", "doc1"}))
assert.True(t, err == nil)
}
示例13: TestPollResultReuseLongpoll
func TestPollResultReuseLongpoll(t *testing.T) {
// Reset the index expvars
indexExpvars.Init()
base.EnableLogKey("IndexPoll")
db := setupTestDBForChangeIndex(t)
defer tearDownTestDB(t, db)
db.ChannelMapper = channels.NewDefaultChannelMapper()
WriteDirectWithKey(db, "docABC_1", []string{"ABC"}, 1)
time.Sleep(100 * time.Millisecond)
// Do a basic changes to trigger start of polling for channel
changes, err := db.GetChanges(base.SetOf("ABC"), ChangesOptions{Since: simpleClockSequence(0)})
assertTrue(t, err == nil, "Error getting changes")
assert.Equals(t, len(changes), 1)
log.Printf("Changes:%+v", changes[0])
// Start a longpoll changes, use waitgroup to delay the test until it returns.
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
since, err := db.ParseSequenceID("2-0")
assertTrue(t, err == nil, "Error parsing sequence ID")
abcHboChanges, err := db.GetChanges(base.SetOf("ABC", "HBO"), ChangesOptions{Since: since, Wait: true})
assertTrue(t, err == nil, "Error getting changes")
// Expects two changes - the nil that's sent on initial wait, and then docABC_2
assert.Equals(t, len(abcHboChanges), 2)
}()
time.Sleep(100 * time.Millisecond)
// Write an entry to channel ABC to notify the waiting longpoll
WriteDirectWithKey(db, "docABC_2", []string{"ABC"}, 2)
wg.Wait()
// Use expvars to confirm poll hits/misses (can't tell from changes response whether it used poll results,
// or reloaded from index). Expect one poll hit (the longpoll request), and one miss (the basic changes request)
assert.Equals(t, getExpvarAsString(indexExpvars, "getChanges_lastPolled_hit"), "1")
assert.Equals(t, getExpvarAsString(indexExpvars, "getChanges_lastPolled_miss"), "1")
}
示例14: TestCalculateChangedPartitions
func TestCalculateChangedPartitions(t *testing.T) {
base.EnableLogKey("ChannelStorage+")
indexBucket := testIndexBucket()
defer indexBucket.Close()
reader := NewDenseStorageReader(indexBucket, "ABC", testPartitionMap())
startClock := getClockForMap(map[uint16]uint64{
0: 0,
100: 0,
200: 0,
})
endClock := getClockForMap(map[uint16]uint64{
0: 5,
100: 10,
200: 15,
})
changedVbs, changedPartitions := reader.calculateChanged(startClock, endClock)
assert.Equals(t, len(changedVbs), 3)
assert.Equals(t, changedVbs[0], uint16(0)) // Partition 0
assert.Equals(t, changedVbs[1], uint16(100)) // Partition 6
assert.Equals(t, changedVbs[2], uint16(200)) // Partition 12
changedPartitionCount := 0
for partition, partitionRange := range changedPartitions {
if partitionRange != nil {
changedPartitionCount++
assertTrue(t, partition == 0 || partition == 6 || partition == 12, "Unexpected changed partition")
}
}
assert.Equals(t, changedPartitions[0].Since.GetSequence(0), uint64(0))
assert.Equals(t, changedPartitions[6].Since.GetSequence(100), uint64(0))
assert.Equals(t, changedPartitions[12].Since.GetSequence(200), uint64(0))
assert.Equals(t, changedPartitions[0].To.GetSequence(0), uint64(5))
assert.Equals(t, changedPartitions[6].To.GetSequence(100), uint64(10))
assert.Equals(t, changedPartitions[12].To.GetSequence(200), uint64(15))
assert.Equals(t, changedPartitionCount, 3)
}
示例15: TestChannelCacheSize
// Test size config
func TestChannelCacheSize(t *testing.T) {
base.EnableLogKey("Cache")
channelOptions := ChannelCacheOptions{
ChannelCacheMinLength: 600,
ChannelCacheMaxLength: 600,
}
options := CacheOptions{
ChannelCacheOptions: channelOptions,
}
log.Printf("Options in test:%+v", options)
db := setupTestDBWithCacheOptions(t, options)
defer tearDownTestDB(t, db)
db.ChannelMapper = channels.NewDefaultChannelMapper()
// Create a user with access to channel ABC
authenticator := db.Authenticator()
user, _ := authenticator.NewUser("naomi", "letmein", channels.SetOf("ABC"))
authenticator.Save(user)
// Write 750 docs to channel ABC
for i := 1; i <= 750; i++ {
WriteDirect(db, []string{"ABC"}, uint64(i))
}
// Validate that retrieval returns expected sequences
db.changeCache.waitForSequence(750)
db.user, _ = authenticator.GetUser("naomi")
changes, err := db.GetChanges(base.SetOf("ABC"), ChangesOptions{Since: SequenceID{Seq: 0}})
assertNoError(t, err, "Couldn't GetChanges")
assert.Equals(t, len(changes), 750)
// Validate that cache stores the expected number of values
changeCache, ok := db.changeCache.(*changeCache)
assertTrue(t, ok, "Testing skipped sequences without a change cache")
abcCache := changeCache.channelCaches["ABC"]
assert.Equals(t, len(abcCache.logs), 600)
}