本文整理汇总了Golang中github.com/balzaczyy/golucene/core/store.IndexInput.Close方法的典型用法代码示例。如果您正苦于以下问题:Golang IndexInput.Close方法的具体用法?Golang IndexInput.Close怎么用?Golang IndexInput.Close使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类github.com/balzaczyy/golucene/core/store.IndexInput
的用法示例。
在下文中一共展示了IndexInput.Close方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: _crash
func (w *MockDirectoryWrapper) _crash() error {
w.crashed = true
w.openFiles = make(map[string]int)
w.openFilesForWrite = make(map[string]bool)
w.openFilesDeleted = make(map[string]bool)
files := w.unSyncedFiles
w.unSyncedFiles = make(map[string]bool)
// first force-close all files, so we can corrupt on windows etc.
// clone the file map, as these guys want to remove themselves on close.
m := make(map[io.Closer]error)
for k, v := range w.openFileHandles {
m[k] = v
}
for f, _ := range m {
f.Close() // ignore error
}
for name, _ := range files {
var action string
var err error
switch w.randomState.Intn(5) {
case 0:
action = "deleted"
err = w.deleteFile(name, true)
case 1:
action = "zeroes"
// Zero out file entirely
var length int64
length, err = w.FileLength(name)
if err == nil {
zeroes := make([]byte, 256)
var upto int64 = 0
var out store.IndexOutput
out, err = w.BaseDirectoryWrapperImpl.CreateOutput(name, NewDefaultIOContext(w.randomState))
if err == nil {
for upto < length && err == nil {
limit := length - upto
if int64(len(zeroes)) < limit {
limit = int64(len(zeroes))
}
err = out.WriteBytes(zeroes[:limit])
upto += limit
}
if err == nil {
err = out.Close()
}
}
}
case 2:
action = "partially truncated"
// Partially Truncate the file:
// First, make temp file and copy only half this file over:
var tempFilename string
for {
tempFilename = fmt.Sprintf("%v", w.randomState.Int())
if !w.BaseDirectoryWrapperImpl.FileExists(tempFilename) {
break
}
}
var tempOut store.IndexOutput
if tempOut, err = w.BaseDirectoryWrapperImpl.CreateOutput(tempFilename, NewDefaultIOContext(w.randomState)); err == nil {
var ii store.IndexInput
if ii, err = w.BaseDirectoryWrapperImpl.OpenInput(name, NewDefaultIOContext(w.randomState)); err == nil {
if err = tempOut.CopyBytes(ii, ii.Length()/2); err == nil {
if err = tempOut.Close(); err == nil {
if err = ii.Close(); err == nil {
// Delete original and copy bytes back:
if err = w.deleteFile(name, true); err == nil {
var out store.IndexOutput
if out, err = w.BaseDirectoryWrapperImpl.CreateOutput(name, NewDefaultIOContext(w.randomState)); err == nil {
if ii, err = w.BaseDirectoryWrapperImpl.OpenInput(tempFilename, NewDefaultIOContext(w.randomState)); err == nil {
if err = out.CopyBytes(ii, ii.Length()); err == nil {
if err = out.Close(); err == nil {
if err = ii.Close(); err == nil {
err = w.deleteFile(tempFilename, true)
}
}
}
}
}
}
}
}
}
}
}
case 3:
// the file survived intact:
action = "didn't change"
default:
action = "fully truncated"
// totally truncate the file to zero bytes
if err = w.deleteFile(name, true); err == nil {
var out store.IndexOutput
if out, err = w.BaseDirectoryWrapperImpl.CreateOutput(name, NewDefaultIOContext(w.randomState)); err == nil {
if err = out.SetLength(0); err == nil {
err = out.Close()
}
}
//.........这里部分代码省略.........
示例2: NewBlockTreeTermsReader
func NewBlockTreeTermsReader(dir store.Directory,
fieldInfos FieldInfos, info *SegmentInfo,
postingsReader PostingsReaderBase, ctx store.IOContext,
segmentSuffix string, indexDivisor int) (p FieldsProducer, err error) {
// log.Print("Initializing BlockTreeTermsReader...")
fp := &BlockTreeTermsReader{
postingsReader: postingsReader,
fields: make(map[string]FieldReader),
segment: info.Name,
}
fp.in, err = dir.OpenInput(util.SegmentFileName(info.Name, segmentSuffix, TERMS_EXTENSION), ctx)
if err != nil {
return nil, err
}
success := false
var indexIn store.IndexInput
defer func() {
if !success {
fmt.Println("Failed to initialize BlockTreeTermsReader.")
if err != nil {
fmt.Println("DEBUG ", err)
}
// this.close() will close in:
util.CloseWhileSuppressingError(indexIn, fp)
}
}()
fp.version, err = fp.readHeader(fp.in)
if err != nil {
return nil, err
}
// log.Printf("Version: %v", fp.version)
if indexDivisor != -1 {
filename := util.SegmentFileName(info.Name, segmentSuffix, TERMS_INDEX_EXTENSION)
indexIn, err = dir.OpenInput(filename, ctx)
if err != nil {
return nil, err
}
indexVersion, err := fp.readIndexHeader(indexIn)
if err != nil {
return nil, err
}
// log.Printf("Index version: %v", indexVersion)
if int(indexVersion) != fp.version {
return nil, errors.New(fmt.Sprintf("mixmatched version files: %v=%v,%v=%v", fp.in, fp.version, indexIn, indexVersion))
}
}
// verify
if indexIn != nil && fp.version >= TERMS_VERSION_CURRENT {
if _, err = store.ChecksumEntireFile(indexIn); err != nil {
return nil, err
}
}
// Have PostingsReader init itself
postingsReader.Init(fp.in)
if fp.version >= TERMS_VERSION_CHECKSUM {
// NOTE: data file is too costly to verify checksum against all the
// bytes on open, but for now we at least verify proper structure
// of the checksum footer: which looks for FOOTER_MAGIC +
// algorithmID. This is cheap and can detect some forms of
// corruption such as file trucation.
if _, err = codec.RetrieveChecksum(fp.in); err != nil {
return nil, err
}
}
// Read per-field details
fp.seekDir(fp.in, fp.dirOffset)
if indexDivisor != -1 {
fp.seekDir(indexIn, fp.indexDirOffset)
}
numFields, err := fp.in.ReadVInt()
if err != nil {
return nil, err
}
// log.Printf("Fields number: %v", numFields)
if numFields < 0 {
return nil, errors.New(fmt.Sprintf("invalid numFields: %v (resource=%v)", numFields, fp.in))
}
for i := int32(0); i < numFields; i++ {
// log.Printf("Next field...")
field, err := fp.in.ReadVInt()
if err != nil {
return nil, err
}
// log.Printf("Field: %v", field)
numTerms, err := fp.in.ReadVLong()
if err != nil {
return nil, err
}
//.........这里部分代码省略.........
示例3: newCompressingStoredFieldsReader
// Sole constructor
func newCompressingStoredFieldsReader(d store.Directory,
si *model.SegmentInfo, segmentSuffix string,
fn model.FieldInfos, ctx store.IOContext, formatName string,
compressionMode compressing.CompressionMode) (r *CompressingStoredFieldsReader, err error) {
r = &CompressingStoredFieldsReader{}
r.compressionMode = compressionMode
segment := si.Name
r.fieldInfos = fn
r.numDocs = si.DocCount()
var indexStream store.IndexInput
success := false
defer func() {
if !success {
log.Println("Failed to initialize CompressionStoredFieldsReader.")
if err != nil {
log.Print(err)
}
util.Close(r, indexStream)
}
}()
// Load the index into memory
indexStreamFN := util.SegmentFileName(segment, segmentSuffix, lucene40.FIELDS_INDEX_EXTENSION)
indexStream, err = d.OpenInput(indexStreamFN, ctx)
if err != nil {
return nil, err
}
codecNameIdx := formatName + CODEC_SFX_IDX
codec.CheckHeader(indexStream, codecNameIdx, CODEC_SFX_VERSION_START, CODEC_SFX_VERSION_CURRENT)
if int64(codec.HeaderLength(codecNameIdx)) != indexStream.FilePointer() {
panic("assert fail")
}
r.indexReader, err = newCompressingStoredFieldsIndexReader(indexStream, si)
if err != nil {
return nil, err
}
err = indexStream.Close()
if err != nil {
return nil, err
}
indexStream = nil
// Open the data file and read metadata
fieldsStreamFN := util.SegmentFileName(segment, segmentSuffix, lucene40.FIELDS_EXTENSION)
r.fieldsStream, err = d.OpenInput(fieldsStreamFN, ctx)
if err != nil {
return nil, err
}
codecNameDat := formatName + CODEC_SFX_DAT
codec.CheckHeader(r.fieldsStream, codecNameDat, CODEC_SFX_VERSION_START, CODEC_SFX_VERSION_CURRENT)
if int64(codec.HeaderLength(codecNameDat)) != r.fieldsStream.FilePointer() {
panic("assert fail")
}
n, err := r.fieldsStream.ReadVInt()
if err != nil {
return nil, err
}
r.packedIntsVersion = int(n)
r.decompressor = compressionMode.NewDecompressor()
r.bytes = make([]byte, 0)
success = true
return r, nil
}
示例4: newBlockTreeTermsReader
func newBlockTreeTermsReader(dir store.Directory,
fieldInfos model.FieldInfos, info *model.SegmentInfo,
postingsReader PostingsReaderBase, ctx store.IOContext,
segmentSuffix string, indexDivisor int) (p FieldsProducer, err error) {
log.Print("Initializing BlockTreeTermsReader...")
fp := &BlockTreeTermsReader{
postingsReader: postingsReader,
fields: make(map[string]FieldReader),
segment: info.Name,
}
fp.in, err = dir.OpenInput(util.SegmentFileName(info.Name, segmentSuffix, BTT_EXTENSION), ctx)
if err != nil {
return fp, err
}
success := false
var indexIn store.IndexInput
defer func() {
if !success {
log.Print("Failed to initialize BlockTreeTermsReader.")
if err != nil {
log.Print("DEBUG ", err)
}
// this.close() will close in:
util.CloseWhileSuppressingError(indexIn, fp)
}
}()
fp.version, err = fp.readHeader(fp.in)
if err != nil {
return fp, err
}
log.Printf("Version: %v", fp.version)
if indexDivisor != -1 {
indexIn, err = dir.OpenInput(util.SegmentFileName(info.Name, segmentSuffix, BTT_INDEX_EXTENSION), ctx)
if err != nil {
return fp, err
}
indexVersion, err := fp.readIndexHeader(indexIn)
if err != nil {
return fp, err
}
log.Printf("Index version: %v", indexVersion)
if int(indexVersion) != fp.version {
return fp, errors.New(fmt.Sprintf("mixmatched version files: %v=%v,%v=%v", fp.in, fp.version, indexIn, indexVersion))
}
}
// Have PostingsReader init itself
postingsReader.Init(fp.in)
// Read per-field details
fp.seekDir(fp.in, fp.dirOffset)
if indexDivisor != -1 {
fp.seekDir(indexIn, fp.indexDirOffset)
}
numFields, err := fp.in.ReadVInt()
if err != nil {
return fp, err
}
log.Printf("Fields number: %v", numFields)
if numFields < 0 {
return fp, errors.New(fmt.Sprintf("invalid numFields: %v (resource=%v)", numFields, fp.in))
}
for i := int32(0); i < numFields; i++ {
log.Printf("Next field...")
field, err := fp.in.ReadVInt()
if err != nil {
return fp, err
}
log.Printf("Field: %v", field)
numTerms, err := fp.in.ReadVLong()
if err != nil {
return fp, err
}
// assert numTerms >= 0
log.Printf("Terms number: %v", numTerms)
numBytes, err := fp.in.ReadVInt()
if err != nil {
return fp, err
}
log.Printf("Bytes number: %v", numBytes)
rootCode := make([]byte, numBytes)
err = fp.in.ReadBytes(rootCode)
if err != nil {
return fp, err
}
fieldInfo := fieldInfos.FieldInfoByNumber(int(field))
// assert fieldInfo != nil
var sumTotalTermFreq int64
if fieldInfo.IndexOptions() == model.INDEX_OPT_DOCS_ONLY {
sumTotalTermFreq = -1
//.........这里部分代码省略.........