本文整理汇总了Golang中github.com/couchbase/sync_gateway/base.UpdateLogKeys函数的典型用法代码示例。如果您正苦于以下问题:Golang UpdateLogKeys函数的具体用法?Golang UpdateLogKeys怎么用?Golang UpdateLogKeys使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了UpdateLogKeys函数的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: TestShadowerPushEchoCancellation
// Make sure a rev inserted into the db by a client replicator doesn't get echoed from the
// shadower as a different revision.
func TestShadowerPushEchoCancellation(t *testing.T) {
var logKeys = map[string]bool{
"Shadow": true,
"Shadow+": true,
}
base.UpdateLogKeys(logKeys, true)
bucket := makeExternalBucket()
defer bucket.Close()
db := setupTestDB(t)
defer tearDownTestDB(t, db)
var err error
db.Shadower, err = NewShadower(db.DatabaseContext, bucket, nil)
assertNoError(t, err, "NewShadower")
// Push an existing doc revision (the way a client's push replicator would)
db.PutExistingRev("foo", Body{"a": "b"}, []string{"1-madeup"})
waitFor(t, func() bool {
return atomic.LoadUint64(&db.Shadower.pullCount) >= 1
})
// Make sure the echoed pull didn't create a new revision:
doc, _ := db.GetDoc("foo")
assert.Equals(t, len(doc.History), 1)
}
示例2: TestShadowerPullRevisionWithMissingParentRev
// Ensure that a new rev pushed from a shadow bucket update, wehre the UpstreamRev does not exist as a parent func init() {
// the documents rev tree does not panic, it should generate a new conflicting branch instead.
// see #1603
func TestShadowerPullRevisionWithMissingParentRev(t *testing.T) {
var logKeys = map[string]bool{
"Shadow": true,
"Shadow+": true,
}
base.UpdateLogKeys(logKeys, true)
bucket := makeExternalBucket()
defer bucket.Close()
db := setupTestDB(t)
defer tearDownTestDB(t, db)
var err error
db.Shadower, err = NewShadower(db.DatabaseContext, bucket, nil)
assertNoError(t, err, "NewShadower")
// Push an existing doc revision (the way a client's push replicator would)
db.PutExistingRev("foo", Body{"a": "b"}, []string{"1-madeup"})
waitFor(t, func() bool {
return atomic.LoadUint64(&db.Shadower.pullCount) >= 1
})
//Directly edit the "upstream_rev" _sync property of the doc
//We don't want to trigger a push to the shadow bucket
raw, _, _ := db.Bucket.GetRaw("foo")
//Unmarshal to JSON
var docObj map[string]interface{}
json.Unmarshal(raw, &docObj)
docObj["upstream_rev"] = "1-notexist"
docBytes, _ := json.Marshal(docObj)
//Write raw doc bytes back to bucket
db.Bucket.SetRaw("foo", 0, docBytes)
//Now edit the raw file in the shadow bucket to
// trigger a shadow pull
bucket.SetRaw("foo", 0, []byte("{\"a\":\"c\"}"))
//validate that upstream_rev was changed in DB
raw, _, _ = db.Bucket.GetRaw("foo")
json.Unmarshal(raw, &docObj)
assert.Equals(t, docObj["upstream_rev"], "1-notexist")
waitFor(t, func() bool {
return atomic.LoadUint64(&db.Shadower.pullCount) >= 2
})
//Assert that we can get the two conflicing revisions
gotBody, err := db.GetRev("foo", "1-madeup", false, nil)
assert.DeepEquals(t, gotBody, Body{"_id": "foo", "a": "b", "_rev": "1-madeup"})
gotBody, err = db.GetRev("foo", "2-edce85747420ad6781bdfccdebf82180", false, nil)
assert.DeepEquals(t, gotBody, Body{"_id": "foo", "a": "c", "_rev": "2-edce85747420ad6781bdfccdebf82180"})
}
示例3: TestSlowExecutionProcessing
// Test sending many events with slow-running execution to validate they get dropped after hitting
// the max concurrent goroutines
func TestSlowExecutionProcessing(t *testing.T) {
em := NewEventManager()
em.Start(0, -1)
var logKeys = map[string]bool{
"Events": true,
}
base.UpdateLogKeys(logKeys, true)
ids := make([]string, 20)
for i := 0; i < 20; i++ {
ids[i] = fmt.Sprintf("%d", i)
}
eventForTest := func(i int) (Body, base.Set) {
testBody := Body{
"_id": ids[i],
"value": i,
}
var channelSet base.Set
if i%2 == 0 {
channelSet = base.SetFromArray([]string{"Even"})
} else {
channelSet = base.SetFromArray([]string{"Odd"})
}
return testBody, channelSet
}
resultChannel := make(chan Body, 100)
testHandler := &TestingHandler{HandledEvent: DocumentChange, handleDelay: 500}
testHandler.SetChannel(resultChannel)
em.RegisterEventHandler(testHandler, DocumentChange)
for i := 0; i < 20; i++ {
body, channels := eventForTest(i % 10)
em.RaiseDocumentChangeEvent(body, "", channels)
}
// wait for Event Manager queue worker to process
time.Sleep(2 * time.Second)
fmt.Println("resultChannel:", len(resultChannel))
assert.True(t, len(resultChannel) == 20)
}
示例4: handleSetLogging
func (h *handler) handleSetLogging() error {
body, err := h.readBody()
if err != nil {
return nil
}
if h.getQuery("level") != "" {
base.SetLogLevel(int(getRestrictedIntQuery(h.rq.URL.Query(), "level", uint64(base.LogLevel()), 1, 3, false)))
if len(body) == 0 {
return nil // empty body is OK if request is just setting the log level
}
}
var keys map[string]bool
if err := json.Unmarshal(body, &keys); err != nil {
return base.HTTPErrorf(http.StatusBadRequest, "Invalid JSON or non-boolean values")
}
base.UpdateLogKeys(keys, h.rq.Method == "PUT")
return nil
}
示例5: TestShadowerPush
func TestShadowerPush(t *testing.T) {
var logKeys = map[string]bool{
"Shadow": true,
}
base.UpdateLogKeys(logKeys, true)
bucket := makeExternalBucket()
defer bucket.Close()
db := setupTestDB(t)
defer tearDownTestDB(t, db)
var err error
db.Shadower, err = NewShadower(db.DatabaseContext, bucket, nil)
assertNoError(t, err, "NewShadower")
key1rev1, err := db.Put("key1", Body{"aaa": "bbb"})
assertNoError(t, err, "Put")
_, err = db.Put("key2", Body{"ccc": "ddd"})
assertNoError(t, err, "Put")
base.Log("Waiting for shadower to catch up...")
var doc1, doc2 Body
waitFor(t, func() bool {
_, err1 := bucket.Get("key1", &doc1)
_, err2 := bucket.Get("key2", &doc2)
return err1 == nil && err2 == nil
})
assert.DeepEquals(t, doc1, Body{"aaa": "bbb"})
assert.DeepEquals(t, doc2, Body{"ccc": "ddd"})
base.Log("Deleting local doc")
db.DeleteDoc("key1", key1rev1)
waitFor(t, func() bool {
_, err = bucket.Get("key1", &doc1)
return err != nil
})
assert.True(t, base.IsDocNotFoundError(err))
}
示例6: TestSkippedViewRetrieval
// Test retrieval of skipped sequence using view. Unit test catches panic, but we don't currently have a way
// to simulate an entry that makes it to the bucket (and so is accessible to the view), but doesn't show up on the TAP feed.
// Cache logging in this test validates that view retrieval is working because of TAP latency (item is in the bucket, but hasn't
// been seen on the TAP feed yet). Longer term could consider enhancing leaky bucket to 'miss' the entry on the tap feed.
func TestSkippedViewRetrieval(t *testing.T) {
var logKeys = map[string]bool{
"Cache": true,
"Cache+": true,
}
base.UpdateLogKeys(logKeys, true)
// Use leaky bucket to have the tap feed 'lose' document 3
leakyConfig := base.LeakyBucketConfig{
TapFeedMissingDocs: []string{"doc-3"},
}
db := setupTestLeakyDBWithCacheOptions(t, shortWaitCache(), leakyConfig)
defer tearDownTestDB(t, db)
db.ChannelMapper = channels.NewDefaultChannelMapper()
// Allow db to initialize and run initial CleanSkippedSequenceQueue
time.Sleep(10 * time.Millisecond)
// Write sequences direct
WriteDirect(db, []string{"ABC"}, 1)
WriteDirect(db, []string{"ABC"}, 2)
WriteDirect(db, []string{"ABC"}, 3)
changeCache, ok := db.changeCache.(*changeCache)
assertTrue(t, ok, "Testing skipped sequences without a change cache")
// Artificially add 3 skipped, and back date skipped entry by 2 hours to trigger attempted view retrieval during Clean call
changeCache.skippedSeqs.Push(&SkippedSequence{3, time.Now().Add(time.Duration(time.Hour * -2))})
changeCache.skippedSeqs.Push(&SkippedSequence{5, time.Now().Add(time.Duration(time.Hour * -2))})
changeCache.CleanSkippedSequenceQueue()
// Validate that 3 is in the channel cache, 5 isn't
entries, err := db.changeCache.GetChanges("ABC", ChangesOptions{Since: SequenceID{Seq: 2}})
assertNoError(t, err, "Get Changes returned error")
assertTrue(t, len(entries) == 1, "Incorrect number of entries returned")
assert.Equals(t, entries[0].DocID, "doc-3")
}
示例7: TestUpdatePrincipal
// Unit test for bug #673
func TestUpdatePrincipal(t *testing.T) {
var logKeys = map[string]bool{
"Cache": true,
"Changes": true,
"Changes+": true,
}
base.UpdateLogKeys(logKeys, true)
db := setupTestDB(t)
defer tearDownTestDB(t, db)
db.ChannelMapper = channels.NewDefaultChannelMapper()
// Create a user with access to channel ABC
authenticator := db.Authenticator()
user, _ := authenticator.NewUser("naomi", "letmein", channels.SetOf("ABC"))
authenticator.Save(user)
// Validate that a call to UpdatePrincipals with no changes to the user doesn't allocate a sequence
userInfo, err := db.GetPrincipal("naomi", true)
userInfo.ExplicitChannels = base.SetOf("ABC")
_, err = db.UpdatePrincipal(*userInfo, true, true)
assertNoError(t, err, "Unable to update principal")
nextSeq, err := db.sequences.nextSequence()
assert.Equals(t, nextSeq, uint64(1))
// Validate that a call to UpdatePrincipals with changes to the user does allocate a sequence
userInfo, err = db.GetPrincipal("naomi", true)
userInfo.ExplicitChannels = base.SetOf("ABC", "PBS")
_, err = db.UpdatePrincipal(*userInfo, true, true)
assertNoError(t, err, "Unable to update principal")
nextSeq, err = db.sequences.nextSequence()
assert.Equals(t, nextSeq, uint64(3))
}
示例8: FailingTestChannelRace
// Test current fails intermittently on concurrent access to var changes. Disabling for now - should be refactored.
func FailingTestChannelRace(t *testing.T) {
var logKeys = map[string]bool{
"Sequences": true,
}
base.UpdateLogKeys(logKeys, true)
db := setupTestDBWithCacheOptions(t, shortWaitCache())
defer tearDownTestDB(t, db)
db.ChannelMapper = channels.NewDefaultChannelMapper()
// Create a user with access to channels "Odd", "Even"
authenticator := db.Authenticator()
user, _ := authenticator.NewUser("naomi", "letmein", channels.SetOf("Even", "Odd"))
authenticator.Save(user)
// Write initial sequences
WriteDirect(db, []string{"Odd"}, 1)
WriteDirect(db, []string{"Even"}, 2)
WriteDirect(db, []string{"Odd"}, 3)
db.changeCache.waitForSequence(3)
db.user, _ = authenticator.GetUser("naomi")
// Start changes feed
var options ChangesOptions
options.Since = SequenceID{Seq: 0}
options.Terminator = make(chan bool)
options.Continuous = true
options.Wait = true
feed, err := db.MultiChangesFeed(base.SetOf("Even", "Odd"), options)
assert.True(t, err == nil)
feedClosed := false
// Go-routine to work the feed channel and write to an array for use by assertions
var changes = make([]*ChangeEntry, 0, 50)
go func() {
for feedClosed == false {
select {
case entry, ok := <-feed:
if ok {
// feed sends nil after each continuous iteration
if entry != nil {
log.Println("Changes entry:", entry.Seq)
changes = append(changes, entry)
}
} else {
log.Println("Closing feed")
feedClosed = true
}
}
}
}()
// Wait for processing of two channels (100 ms each)
time.Sleep(250 * time.Millisecond)
// Validate the initial sequences arrive as expected
assert.Equals(t, len(changes), 3)
// Send update to trigger the start of the next changes iteration
WriteDirect(db, []string{"Even"}, 4)
time.Sleep(150 * time.Millisecond)
// After read of "Even" channel, but before read of "Odd" channel, send three new entries
WriteDirect(db, []string{"Odd"}, 5)
WriteDirect(db, []string{"Even"}, 6)
WriteDirect(db, []string{"Odd"}, 7)
time.Sleep(100 * time.Millisecond)
// At this point we've haven't sent sequence 6, but the continuous changes feed has since=7
// Write a few more to validate that we're not catching up on the missing '6' later
WriteDirect(db, []string{"Even"}, 8)
WriteDirect(db, []string{"Odd"}, 9)
time.Sleep(750 * time.Millisecond)
assert.Equals(t, len(changes), 9)
assert.True(t, verifyChangesFullSequences(changes, []string{"1", "2", "3", "4", "5", "6", "7", "8", "9"}))
changesString := ""
for _, change := range changes {
changesString = fmt.Sprintf("%s%d, ", changesString, change.Seq.Seq)
}
fmt.Println("changes: ", changesString)
close(options.Terminator)
}
示例9: TestLowSequenceHandlingWithAccessGrant
// Test low sequence handling of late arriving sequences to a continuous changes feed, when the
// user gets added to a new channel with existing entries (and existing backfill)
func TestLowSequenceHandlingWithAccessGrant(t *testing.T) {
var logKeys = map[string]bool{
"Sequence": true,
}
base.UpdateLogKeys(logKeys, true)
db := setupTestDBWithCacheOptions(t, shortWaitCache())
defer tearDownTestDB(t, db)
db.ChannelMapper = channels.NewDefaultChannelMapper()
// Create a user with access to channel ABC
authenticator := db.Authenticator()
user, _ := authenticator.NewUser("naomi", "letmein", channels.SetOf("ABC"))
authenticator.Save(user)
// Simulate seq 3 and 4 being delayed - write 1,2,5,6
WriteDirect(db, []string{"ABC"}, 1)
WriteDirect(db, []string{"ABC"}, 2)
WriteDirect(db, []string{"PBS"}, 5)
WriteDirect(db, []string{"ABC", "PBS"}, 6)
db.changeCache.waitForSequence(6)
db.user, _ = authenticator.GetUser("naomi")
// Start changes feed
var options ChangesOptions
options.Since = SequenceID{Seq: 0}
options.Terminator = make(chan bool)
options.Continuous = true
options.Wait = true
feed, err := db.MultiChangesFeed(base.SetOf("*"), options)
assert.True(t, err == nil)
// Go-routine to work the feed channel and write to an array for use by assertions
var changes = make([]*ChangeEntry, 0, 50)
time.Sleep(50 * time.Millisecond)
// Validate the initial sequences arrive as expected
err = appendFromFeed(&changes, feed, 3)
assert.True(t, err == nil)
assert.Equals(t, len(changes), 3)
assert.True(t, verifyChangesFullSequences(changes, []string{"1", "2", "2::6"}))
db.Bucket.Incr("_sync:seq", 7, 0, 0)
// Modify user to have access to both channels (sequence 2):
userInfo, err := db.GetPrincipal("naomi", true)
assert.True(t, userInfo != nil)
userInfo.ExplicitChannels = base.SetOf("ABC", "PBS")
_, err = db.UpdatePrincipal(*userInfo, true, true)
assertNoError(t, err, "UpdatePrincipal failed")
WriteDirect(db, []string{"PBS"}, 9)
db.changeCache.waitForSequence(9)
time.Sleep(50 * time.Millisecond)
err = appendFromFeed(&changes, feed, 4)
assert.True(t, err == nil)
assert.Equals(t, len(changes), 7)
assert.True(t, verifyChangesFullSequences(changes, []string{"1", "2", "2::6", "2:8:5", "2:8:6", "2::8", "2::9"}))
// Notes:
// 1. 2::8 is the user sequence
// 2. The duplicate send of sequence '6' is the standard behaviour when a channel is added - we don't know
// whether the user has already seen the documents on the channel previously, so it gets resent
close(options.Terminator)
}
示例10: TestLowSequenceHandling
// Test low sequence handling of late arriving sequences to a continuous changes feed
func TestLowSequenceHandling(t *testing.T) {
var logKeys = map[string]bool{
"Cache": true,
"Changes": true,
"Changes+": true,
}
base.UpdateLogKeys(logKeys, true)
db := setupTestDBWithCacheOptions(t, shortWaitCache())
defer tearDownTestDB(t, db)
db.ChannelMapper = channels.NewDefaultChannelMapper()
// Create a user with access to channel ABC
authenticator := db.Authenticator()
user, _ := authenticator.NewUser("naomi", "letmein", channels.SetOf("ABC", "PBS", "NBC", "TBS"))
authenticator.Save(user)
// Simulate seq 3 and 4 being delayed - write 1,2,5,6
WriteDirect(db, []string{"ABC", "NBC"}, 1)
WriteDirect(db, []string{"ABC"}, 2)
WriteDirect(db, []string{"ABC", "PBS"}, 5)
WriteDirect(db, []string{"ABC", "PBS"}, 6)
db.changeCache.waitForSequenceID(SequenceID{Seq: 6})
db.user, _ = authenticator.GetUser("naomi")
// Start changes feed
var options ChangesOptions
options.Since = SequenceID{Seq: 0}
options.Terminator = make(chan bool)
defer close(options.Terminator)
options.Continuous = true
options.Wait = true
feed, err := db.MultiChangesFeed(base.SetOf("*"), options)
assert.True(t, err == nil)
// Array to read changes from feed to support assertions
var changes = make([]*ChangeEntry, 0, 50)
time.Sleep(50 * time.Millisecond)
err = appendFromFeed(&changes, feed, 4)
// Validate the initial sequences arrive as expected
assert.True(t, err == nil)
assert.Equals(t, len(changes), 4)
assert.DeepEquals(t, changes[0], &ChangeEntry{
Seq: SequenceID{Seq: 1, TriggeredBy: 0, LowSeq: 2},
ID: "doc-1",
Changes: []ChangeRev{{"rev": "1-a"}}})
// Test backfill clear - sequence numbers go back to standard handling
WriteDirect(db, []string{"ABC", "NBC", "PBS", "TBS"}, 3)
WriteDirect(db, []string{"ABC", "PBS"}, 4)
db.changeCache.waitForSequenceWithMissing(4)
time.Sleep(50 * time.Millisecond)
err = appendFromFeed(&changes, feed, 2)
assert.True(t, err == nil)
assert.Equals(t, len(changes), 6)
assert.True(t, verifyChangesSequencesIgnoreOrder(changes, []uint64{1, 2, 5, 6, 3, 4}))
WriteDirect(db, []string{"ABC"}, 7)
WriteDirect(db, []string{"ABC", "NBC"}, 8)
WriteDirect(db, []string{"ABC", "PBS"}, 9)
db.changeCache.waitForSequence(9)
appendFromFeed(&changes, feed, 5)
assert.True(t, verifyChangesSequencesIgnoreOrder(changes, []uint64{1, 2, 5, 6, 3, 4, 7, 8, 9}))
}
示例11: TestContinuousChangesBackfill
// Test backfill of late arriving sequences to a continuous changes feed
func TestContinuousChangesBackfill(t *testing.T) {
var logKeys = map[string]bool{
"Sequences": true,
"Cache": true,
"Changes+": true,
}
base.UpdateLogKeys(logKeys, true)
db := setupTestDBWithCacheOptions(t, shortWaitCache())
defer tearDownTestDB(t, db)
db.ChannelMapper = channels.NewDefaultChannelMapper()
// Create a user with access to channel ABC
authenticator := db.Authenticator()
user, _ := authenticator.NewUser("naomi", "letmein", channels.SetOf("ABC", "PBS", "NBC", "CBS"))
authenticator.Save(user)
// Simulate seq 3 and 4 being delayed - write 1,2,5,6
WriteDirect(db, []string{"ABC", "NBC"}, 1)
WriteDirect(db, []string{"ABC"}, 2)
WriteDirect(db, []string{"PBS"}, 5)
WriteDirect(db, []string{"CBS"}, 6)
db.changeCache.waitForSequenceID(SequenceID{Seq: 6})
db.user, _ = authenticator.GetUser("naomi")
// Start changes feed
var options ChangesOptions
options.Since = SequenceID{Seq: 0}
options.Terminator = make(chan bool)
options.Continuous = true
options.Wait = true
feed, err := db.MultiChangesFeed(base.SetOf("*"), options)
assert.True(t, err == nil)
// Array to read changes from feed to support assertions
var changes = make([]*ChangeEntry, 0, 50)
time.Sleep(50 * time.Millisecond)
// Validate the initial sequences arrive as expected
err = appendFromFeed(&changes, feed, 4)
assert.True(t, err == nil)
assert.Equals(t, len(changes), 4)
assert.DeepEquals(t, changes[0], &ChangeEntry{
Seq: SequenceID{Seq: 1, TriggeredBy: 0, LowSeq: 2},
ID: "doc-1",
Changes: []ChangeRev{{"rev": "1-a"}}})
WriteDirect(db, []string{"CBS"}, 3)
WriteDirect(db, []string{"PBS"}, 12)
db.changeCache.waitForSequenceID(SequenceID{Seq: 12})
time.Sleep(50 * time.Millisecond)
err = appendFromFeed(&changes, feed, 2)
assert.Equals(t, len(changes), 6)
assert.True(t, verifyChangesFullSequences(changes, []string{
"1", "2", "2::5", "2::6", "3", "3::12"}))
// Test multiple backfill in single changes loop iteration
WriteDirect(db, []string{"ABC", "NBC", "PBS", "CBS"}, 4)
WriteDirect(db, []string{"ABC", "NBC", "PBS", "CBS"}, 7)
WriteDirect(db, []string{"ABC", "PBS"}, 8)
WriteDirect(db, []string{"ABC", "PBS"}, 13)
db.changeCache.waitForSequenceID(SequenceID{Seq: 13})
time.Sleep(50 * time.Millisecond)
err = appendFromFeed(&changes, feed, 4)
// We can't guarantee how compound sequences will be generated in a multi-core test - will
// depend on timing of arrival in late sequence logs. e.g. could come through as any one of
// the following (where all are valid), depending on timing:
// ..."4","7","8","8::13"
// ..."4", "6::7", "6::8", "6::13"
// ..."3::4", "3::7", "3::8", "3::13"
// For this reason, we're just verifying the number of sequences is correct
assert.Equals(t, len(changes), 10)
close(options.Terminator)
}
示例12: TestOneShotChangesWithExplicitDocIds
func TestOneShotChangesWithExplicitDocIds(t *testing.T) {
var logKeys = map[string]bool{
"TEST": true,
}
base.UpdateLogKeys(logKeys, true)
rt := restTester{syncFn: `function(doc) {channel(doc.channels)}`}
// Create user1
response := rt.sendAdminRequest("PUT", "/db/_user/user1", `{"email":"[email protected]", "password":"letmein", "admin_channels":["alpha"]}`)
assertStatus(t, response, 201)
// Create user2
response = rt.sendAdminRequest("PUT", "/db/_user/user2", `{"email":"[email protected]", "password":"letmein", "admin_channels":["beta"]}`)
assertStatus(t, response, 201)
// Create user3
response = rt.sendAdminRequest("PUT", "/db/_user/user3", `{"email":"[email protected]", "password":"letmein", "admin_channels":["alpha","beta"]}`)
assertStatus(t, response, 201)
// Create user4
response = rt.sendAdminRequest("PUT", "/db/_user/user4", `{"email":"[email protected]", "password":"letmein", "admin_channels":[]}`)
assertStatus(t, response, 201)
// Create user5
response = rt.sendAdminRequest("PUT", "/db/_user/user5", `{"email":"[email protected]", "password":"letmein", "admin_channels":["*"]}`)
assertStatus(t, response, 201)
//Create docs
assertStatus(t, rt.sendRequest("PUT", "/db/doc1", `{"channels":["alpha"]}`), 201)
assertStatus(t, rt.sendRequest("PUT", "/db/doc2", `{"channels":["alpha"]}`), 201)
assertStatus(t, rt.sendRequest("PUT", "/db/doc3", `{"channels":["alpha"]}`), 201)
assertStatus(t, rt.sendRequest("PUT", "/db/doc4", `{"channels":["alpha"]}`), 201)
assertStatus(t, rt.sendRequest("PUT", "/db/docA", `{"channels":["beta"]}`), 201)
assertStatus(t, rt.sendRequest("PUT", "/db/docB", `{"channels":["beta"]}`), 201)
assertStatus(t, rt.sendRequest("PUT", "/db/docC", `{"channels":["beta"]}`), 201)
assertStatus(t, rt.sendRequest("PUT", "/db/docD", `{"channels":["beta"]}`), 201)
// Create struct to hold changes response
var changes struct {
Results []db.ChangeEntry
}
//User has access to single channel
body := `{"filter":"_doc_ids", "doc_ids":["doc4", "doc1", "docA", "b0gus"]}`
request, _ := http.NewRequest("POST", "/db/_changes", bytes.NewBufferString(body))
request.SetBasicAuth("user1", "letmein")
response = rt.send(request)
assertStatus(t, response, 200)
err := json.Unmarshal(response.Body.Bytes(), &changes)
assert.Equals(t, err, nil)
assert.Equals(t, len(changes.Results), 2)
assert.Equals(t, changes.Results[1].ID, "doc4")
//User has access to different single channel
body = `{"filter":"_doc_ids", "doc_ids":["docC", "b0gus", "docB", "docD", "doc1"]}`
request, _ = http.NewRequest("POST", "/db/_changes", bytes.NewBufferString(body))
request.SetBasicAuth("user2", "letmein")
response = rt.send(request)
assertStatus(t, response, 200)
err = json.Unmarshal(response.Body.Bytes(), &changes)
assert.Equals(t, err, nil)
assert.Equals(t, len(changes.Results), 3)
assert.Equals(t, changes.Results[2].ID, "docD")
//User has access to multiple channels
body = `{"filter":"_doc_ids", "doc_ids":["docC", "b0gus", "doc4", "docD", "doc1"]}`
request, _ = http.NewRequest("POST", "/db/_changes", bytes.NewBufferString(body))
request.SetBasicAuth("user3", "letmein")
response = rt.send(request)
assertStatus(t, response, 200)
err = json.Unmarshal(response.Body.Bytes(), &changes)
assert.Equals(t, err, nil)
assert.Equals(t, len(changes.Results), 4)
assert.Equals(t, changes.Results[3].ID, "docD")
//User has no channel access
body = `{"filter":"_doc_ids", "doc_ids":["docC", "b0gus", "doc4", "docD", "doc1"]}`
request, _ = http.NewRequest("POST", "/db/_changes", bytes.NewBufferString(body))
request.SetBasicAuth("user4", "letmein")
response = rt.send(request)
assertStatus(t, response, 200)
err = json.Unmarshal(response.Body.Bytes(), &changes)
assert.Equals(t, err, nil)
assert.Equals(t, len(changes.Results), 0)
//User has "*" channel access
body = `{"filter":"_doc_ids", "doc_ids":["docC", "b0gus", "doc4", "docD", "doc1", "docA"]}`
request, _ = http.NewRequest("POST", "/db/_changes", bytes.NewBufferString(body))
request.SetBasicAuth("user5", "letmein")
response = rt.send(request)
assertStatus(t, response, 200)
err = json.Unmarshal(response.Body.Bytes(), &changes)
assert.Equals(t, err, nil)
assert.Equals(t, len(changes.Results), 5)
//User has "*" channel access, override POST with GET params
body = `{"filter":"_doc_ids", "doc_ids":["docC", "b0gus", "doc4", "docD", "doc1", "docA"]}`
request, _ = http.NewRequest("POST", `/db/_changes?doc_ids=["docC","doc1"]`, bytes.NewBufferString(body))
request.SetBasicAuth("user5", "letmein")
//.........这里部分代码省略.........
示例13: TestWebhookTimeout
func TestWebhookTimeout(t *testing.T) {
if !testLiveHTTP {
return
}
var logKeys = map[string]bool{
"Events+": true,
}
base.UpdateLogKeys(logKeys, true)
count, sum, _ := InitWebhookTest()
ids := make([]string, 200)
for i := 0; i < 200; i++ {
ids[i] = fmt.Sprintf("%d", i)
}
time.Sleep(1 * time.Second)
eventForTest := func(i int) (Body, base.Set) {
testBody := Body{
"_id": ids[i],
"value": i,
}
var channelSet base.Set
if i%2 == 0 {
channelSet = base.SetFromArray([]string{"Even"})
} else {
channelSet = base.SetFromArray([]string{"Odd"})
}
return testBody, channelSet
}
// Test fast execution, short timeout. All events processed
log.Println("Test fast webhook, short timeout")
em := NewEventManager()
em.Start(0, -1)
timeout := uint64(2)
webhookHandler, _ := NewWebhook("http://localhost:8081/echo", "", &timeout)
em.RegisterEventHandler(webhookHandler, DocumentChange)
for i := 0; i < 10; i++ {
body, channels := eventForTest(i)
em.RaiseDocumentChangeEvent(body, "", channels)
}
time.Sleep(50 * time.Millisecond)
assert.Equals(t, *count, 10)
// Test slow webhook, short timeout, numProcess=1, waitForProcess > timeout. All events should get processed.
log.Println("Test slow webhook, short timeout")
*count, *sum = 0, 0.0
errCount := 0
em = NewEventManager()
em.Start(1, 1100)
timeout = uint64(1)
webhookHandler, _ = NewWebhook("http://localhost:8081/slow_2s", "", &timeout)
em.RegisterEventHandler(webhookHandler, DocumentChange)
for i := 0; i < 10; i++ {
body, channels := eventForTest(i)
err := em.RaiseDocumentChangeEvent(body, "", channels)
time.Sleep(2 * time.Millisecond)
if err != nil {
errCount++
}
}
time.Sleep(15 * time.Second)
// Even though we timed out waiting for response on the SG side, POST still completed on target side.
assert.Equals(t, *count, 10)
// Test slow webhook, short timeout, numProcess=1, waitForProcess << timeout. Events that don't fit in queues
// should get dropped (1 immediately processed, 1 in normal queue, 3 in overflow queue, 5 dropped)
log.Println("Test very slow webhook, short timeout")
*count, *sum = 0, 0.0
errCount = 0
em = NewEventManager()
em.Start(1, 100)
timeout = uint64(9)
webhookHandler, _ = NewWebhook("http://localhost:8081/slow_5s", "", &timeout)
em.RegisterEventHandler(webhookHandler, DocumentChange)
for i := 0; i < 10; i++ {
body, channels := eventForTest(i)
err := em.RaiseDocumentChangeEvent(body, "", channels)
time.Sleep(2 * time.Millisecond)
if err != nil {
errCount++
}
}
// wait for slow webhook to finish processing
time.Sleep(25 * time.Second)
assert.Equals(t, *count, 5)
// Test slow webhook, no timeout, numProcess=1, waitForProcess=1s. All events should complete.
log.Println("Test slow webhook, no timeout, wait for process ")
*count, *sum = 0, 0.0
errCount = 0
em = NewEventManager()
em.Start(1, 1100)
timeout = uint64(0)
webhookHandler, _ = NewWebhook("http://localhost:8081/slow", "", &timeout)
em.RegisterEventHandler(webhookHandler, DocumentChange)
for i := 0; i < 10; i++ {
//.........这里部分代码省略.........