本文整理匯總了Golang中github.com/acgshare/bleve/index/store.KVWriter.Close方法的典型用法代碼示例。如果您正苦於以下問題:Golang KVWriter.Close方法的具體用法?Golang KVWriter.Close怎麽用?Golang KVWriter.Close使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類github.com/acgshare/bleve/index/store.KVWriter
的用法示例。
在下文中一共展示了KVWriter.Close方法的8個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: Update
func (f *Firestorm) Update(doc *document.Document) (err error) {
// assign this document a number
doc.Number = atomic.AddUint64(&f.highDocNumber, 1)
// do analysis before acquiring write lock
analysisStart := time.Now()
resultChan := make(chan *index.AnalysisResult)
aw := index.NewAnalysisWork(f, doc, resultChan)
// put the work on the queue
f.analysisQueue.Queue(aw)
// wait for the result
result := <-resultChan
close(resultChan)
atomic.AddUint64(&f.stats.analysisTime, uint64(time.Since(analysisStart)))
// start a writer for this update
indexStart := time.Now()
var kvwriter store.KVWriter
kvwriter, err = f.store.Writer()
if err != nil {
return
}
defer func() {
if cerr := kvwriter.Close(); err == nil && cerr != nil {
err = cerr
}
}()
var dictionaryDeltas map[string]int64
dictionaryDeltas, err = f.batchRows(kvwriter, [][]index.IndexRow{result.Rows}, nil)
if err != nil {
_ = kvwriter.Close()
atomic.AddUint64(&f.stats.errors, 1)
return
}
f.compensator.Mutate([]byte(doc.ID), doc.Number)
f.lookuper.NotifyBatch([]*InFlightItem{&InFlightItem{[]byte(doc.ID), doc.Number}})
f.dictUpdater.NotifyBatch(dictionaryDeltas)
atomic.AddUint64(&f.stats.indexTime, uint64(time.Since(indexStart)))
return
}
示例2: DeleteInternal
func (f *Firestorm) DeleteInternal(key []byte) (err error) {
internalRow := NewInternalRow(key, nil)
var writer store.KVWriter
writer, err = f.store.Writer()
if err != nil {
return
}
defer func() {
if cerr := writer.Close(); err == nil && cerr != nil {
err = cerr
}
}()
wb := writer.NewBatch()
wb.Delete(internalRow.Key())
return writer.ExecuteBatch(wb)
}
示例3: DeleteInternal
func (udc *UpsideDownCouch) DeleteInternal(key []byte) (err error) {
internalRow := NewInternalRow(key, nil)
udc.writeMutex.Lock()
defer udc.writeMutex.Unlock()
var writer store.KVWriter
writer, err = udc.store.Writer()
if err != nil {
return
}
defer func() {
if cerr := writer.Close(); err == nil && cerr != nil {
err = cerr
}
}()
batch := writer.NewBatch()
batch.Delete(internalRow.Key())
return writer.ExecuteBatch(batch)
}
示例4: Batch
func (udc *UpsideDownCouch) Batch(batch *index.Batch) (err error) {
analysisStart := time.Now()
resultChan := make(chan *index.AnalysisResult, len(batch.IndexOps))
var numUpdates uint64
for _, doc := range batch.IndexOps {
if doc != nil {
numUpdates++
}
}
var detectedUnsafeMutex sync.RWMutex
detectedUnsafe := false
go func() {
sofar := uint64(0)
for _, doc := range batch.IndexOps {
if doc != nil {
sofar++
if sofar > numUpdates {
detectedUnsafeMutex.Lock()
detectedUnsafe = true
detectedUnsafeMutex.Unlock()
return
}
aw := index.NewAnalysisWork(udc, doc, resultChan)
// put the work on the queue
udc.analysisQueue.Queue(aw)
}
}
}()
// retrieve back index rows concurrent with analysis
docBackIndexRowErr := error(nil)
docBackIndexRowCh := make(chan *docBackIndexRow, len(batch.IndexOps))
udc.writeMutex.Lock()
defer udc.writeMutex.Unlock()
go func() {
defer close(docBackIndexRowCh)
// open a reader for backindex lookup
var kvreader store.KVReader
kvreader, err = udc.store.Reader()
if err != nil {
docBackIndexRowErr = err
return
}
for docID, doc := range batch.IndexOps {
backIndexRow, err := udc.backIndexRowForDoc(kvreader, docID)
if err != nil {
docBackIndexRowErr = err
return
}
docBackIndexRowCh <- &docBackIndexRow{docID, doc, backIndexRow}
}
err = kvreader.Close()
if err != nil {
docBackIndexRowErr = err
return
}
}()
// wait for analysis result
newRowsMap := make(map[string][]index.IndexRow)
var itemsDeQueued uint64
for itemsDeQueued < numUpdates {
result := <-resultChan
newRowsMap[result.DocID] = result.Rows
itemsDeQueued++
}
close(resultChan)
atomic.AddUint64(&udc.stats.analysisTime, uint64(time.Since(analysisStart)))
docsAdded := uint64(0)
docsDeleted := uint64(0)
indexStart := time.Now()
// prepare a list of rows
var addRowsAll [][]UpsideDownCouchRow
var updateRowsAll [][]UpsideDownCouchRow
var deleteRowsAll [][]UpsideDownCouchRow
// add the internal ops
var updateRows []UpsideDownCouchRow
var deleteRows []UpsideDownCouchRow
for internalKey, internalValue := range batch.InternalOps {
if internalValue == nil {
// delete
deleteInternalRow := NewInternalRow([]byte(internalKey), nil)
deleteRows = append(deleteRows, deleteInternalRow)
} else {
//.........這裏部分代碼省略.........
示例5: Delete
func (udc *UpsideDownCouch) Delete(id string) (err error) {
indexStart := time.Now()
udc.writeMutex.Lock()
defer udc.writeMutex.Unlock()
// open a reader for backindex lookup
var kvreader store.KVReader
kvreader, err = udc.store.Reader()
if err != nil {
return
}
// first we lookup the backindex row for the doc id if it exists
// lookup the back index row
var backIndexRow *BackIndexRow
backIndexRow, err = udc.backIndexRowForDoc(kvreader, id)
if err != nil {
_ = kvreader.Close()
atomic.AddUint64(&udc.stats.errors, 1)
return
}
err = kvreader.Close()
if err != nil {
return
}
if backIndexRow == nil {
atomic.AddUint64(&udc.stats.deletes, 1)
return
}
// start a writer for this delete
var kvwriter store.KVWriter
kvwriter, err = udc.store.Writer()
if err != nil {
return
}
defer func() {
if cerr := kvwriter.Close(); err == nil && cerr != nil {
err = cerr
}
}()
var deleteRowsAll [][]UpsideDownCouchRow
deleteRows := udc.deleteSingle(id, backIndexRow, nil)
if len(deleteRows) > 0 {
deleteRowsAll = append(deleteRowsAll, deleteRows)
}
err = udc.batchRows(kvwriter, nil, nil, deleteRowsAll)
if err == nil {
udc.m.Lock()
udc.docCount--
udc.m.Unlock()
}
atomic.AddUint64(&udc.stats.indexTime, uint64(time.Since(indexStart)))
if err == nil {
atomic.AddUint64(&udc.stats.deletes, 1)
} else {
atomic.AddUint64(&udc.stats.errors, 1)
}
return
}
示例6: Update
func (udc *UpsideDownCouch) Update(doc *document.Document) (err error) {
// do analysis before acquiring write lock
analysisStart := time.Now()
resultChan := make(chan *index.AnalysisResult)
aw := index.NewAnalysisWork(udc, doc, resultChan)
// put the work on the queue
udc.analysisQueue.Queue(aw)
// wait for the result
result := <-resultChan
close(resultChan)
atomic.AddUint64(&udc.stats.analysisTime, uint64(time.Since(analysisStart)))
udc.writeMutex.Lock()
defer udc.writeMutex.Unlock()
// open a reader for backindex lookup
var kvreader store.KVReader
kvreader, err = udc.store.Reader()
if err != nil {
return
}
// first we lookup the backindex row for the doc id if it exists
// lookup the back index row
var backIndexRow *BackIndexRow
backIndexRow, err = udc.backIndexRowForDoc(kvreader, doc.ID)
if err != nil {
_ = kvreader.Close()
atomic.AddUint64(&udc.stats.errors, 1)
return
}
err = kvreader.Close()
if err != nil {
return
}
// start a writer for this update
indexStart := time.Now()
var kvwriter store.KVWriter
kvwriter, err = udc.store.Writer()
if err != nil {
return
}
defer func() {
if cerr := kvwriter.Close(); err == nil && cerr != nil {
err = cerr
}
}()
// prepare a list of rows
var addRowsAll [][]UpsideDownCouchRow
var updateRowsAll [][]UpsideDownCouchRow
var deleteRowsAll [][]UpsideDownCouchRow
addRows, updateRows, deleteRows := udc.mergeOldAndNew(backIndexRow, result.Rows)
if len(addRows) > 0 {
addRowsAll = append(addRowsAll, addRows)
}
if len(updateRows) > 0 {
updateRowsAll = append(updateRowsAll, updateRows)
}
if len(deleteRows) > 0 {
deleteRowsAll = append(deleteRowsAll, deleteRows)
}
err = udc.batchRows(kvwriter, addRowsAll, updateRowsAll, deleteRowsAll)
if err == nil && backIndexRow == nil {
udc.m.Lock()
udc.docCount++
udc.m.Unlock()
}
atomic.AddUint64(&udc.stats.indexTime, uint64(time.Since(indexStart)))
if err == nil {
atomic.AddUint64(&udc.stats.updates, 1)
} else {
atomic.AddUint64(&udc.stats.errors, 1)
}
return
}
示例7: Open
func (udc *UpsideDownCouch) Open() (err error) {
//acquire the write mutex for the duratin of Open()
udc.writeMutex.Lock()
defer udc.writeMutex.Unlock()
// open the kv store
storeConstructor := registry.KVStoreConstructorByName(udc.storeName)
if storeConstructor == nil {
err = index.ErrorUnknownStorageType
return
}
// now open the store
udc.store, err = storeConstructor(&mergeOperator, udc.storeConfig)
if err != nil {
return
}
// start a reader to look at the index
var kvreader store.KVReader
kvreader, err = udc.store.Reader()
if err != nil {
return
}
var value []byte
value, err = kvreader.Get(VersionKey)
if err != nil {
_ = kvreader.Close()
return
}
if value != nil {
err = udc.loadSchema(kvreader)
if err != nil {
_ = kvreader.Close()
return
}
// set doc count
udc.m.Lock()
udc.docCount, err = udc.countDocs(kvreader)
udc.m.Unlock()
err = kvreader.Close()
} else {
// new index, close the reader and open writer to init
err = kvreader.Close()
if err != nil {
return
}
var kvwriter store.KVWriter
kvwriter, err = udc.store.Writer()
if err != nil {
return
}
defer func() {
if cerr := kvwriter.Close(); err == nil && cerr != nil {
err = cerr
}
}()
// init the index
err = udc.init(kvwriter)
}
return
}
示例8: Batch
//.........這裏部分代碼省略.........
for _, doc := range batch.IndexOps {
if doc != nil {
sofar++
if sofar > docsUpdated {
detectedUnsafeMutex.Lock()
detectedUnsafe = true
detectedUnsafeMutex.Unlock()
return
}
aw := index.NewAnalysisWork(f, doc, resultChan)
// put the work on the queue
f.analysisQueue.Queue(aw)
}
}
}()
// extra 1 capacity for internal updates.
collectRows := make([][]index.IndexRow, 0, docsUpdated+1)
// wait for the result
var itemsDeQueued uint64
for itemsDeQueued < docsUpdated {
result := <-resultChan
collectRows = append(collectRows, result.Rows)
itemsDeQueued++
}
close(resultChan)
detectedUnsafeMutex.RLock()
defer detectedUnsafeMutex.RUnlock()
if detectedUnsafe {
return UnsafeBatchUseDetected
}
atomic.AddUint64(&f.stats.analysisTime, uint64(time.Since(analysisStart)))
var deleteKeys [][]byte
if len(batch.InternalOps) > 0 {
// add the internal ops
updateInternalRows := make([]index.IndexRow, 0, len(batch.InternalOps))
for internalKey, internalValue := range batch.InternalOps {
if internalValue == nil {
// delete
deleteInternalRow := NewInternalRow([]byte(internalKey), nil)
deleteKeys = append(deleteKeys, deleteInternalRow.Key())
} else {
updateInternalRow := NewInternalRow([]byte(internalKey), internalValue)
updateInternalRows = append(updateInternalRows, updateInternalRow)
}
}
collectRows = append(collectRows, updateInternalRows)
}
inflightItems := make([]*InFlightItem, 0, len(batch.IndexOps))
for docID, doc := range batch.IndexOps {
if doc != nil {
inflightItems = append(inflightItems,
&InFlightItem{[]byte(docID), doc.Number})
} else {
inflightItems = append(inflightItems,
&InFlightItem{[]byte(docID), 0})
}
}
indexStart := time.Now()
// start a writer for this batch
var kvwriter store.KVWriter
kvwriter, err = f.store.Writer()
if err != nil {
return
}
var dictionaryDeltas map[string]int64
dictionaryDeltas, err = f.batchRows(kvwriter, collectRows, deleteKeys)
if err != nil {
_ = kvwriter.Close()
atomic.AddUint64(&f.stats.errors, 1)
return
}
f.compensator.MutateBatch(inflightItems, lastDocNumber)
err = kvwriter.Close()
f.lookuper.NotifyBatch(inflightItems)
f.dictUpdater.NotifyBatch(dictionaryDeltas)
atomic.AddUint64(&f.stats.indexTime, uint64(time.Since(indexStart)))
if err == nil {
atomic.AddUint64(&f.stats.updates, docsUpdated)
atomic.AddUint64(&f.stats.deletes, docsDeleted)
atomic.AddUint64(&f.stats.batches, 1)
} else {
atomic.AddUint64(&f.stats.errors, 1)
}
return
}