本文整理汇总了Golang中github.com/balzaczyy/golucene/core/store.Directory类的典型用法代码示例。如果您正苦于以下问题:Golang Directory类的具体用法?Golang Directory怎么用?Golang Directory使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Directory类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: ReadFieldInfos
/* Reads the most recent FieldInfos of the given segment info. */
func ReadFieldInfos(info *SegmentCommitInfo) (fis FieldInfos, err error) {
var dir store.Directory
var closeDir bool
if info.FieldInfosGen() == -1 && info.Info.IsCompoundFile() {
// no fieldInfos gen and segment uses a compound file
if dir, err = store.NewCompoundFileDirectory(info.Info.Dir,
util.SegmentFileName(info.Info.Name, "", store.COMPOUND_FILE_EXTENSION),
store.IO_CONTEXT_READONCE, false); err != nil {
return
}
closeDir = true
} else {
// gen'd FIS are read outside CFS, or the segment doesn't use a compound file
dir = info.Info.Dir
closeDir = false
}
defer func() {
if closeDir {
err = mergeError(err, dir.Close())
}
}()
var segmentSuffix string
if n := info.FieldInfosGen(); n != -1 {
segmentSuffix = strconv.FormatInt(n, 36)
}
codec := info.Info.Codec().(Codec)
fisFormat := codec.FieldInfosFormat()
return fisFormat.FieldInfosReader()(dir, info.Info.Name, segmentSuffix, store.IO_CONTEXT_READONCE)
}
示例2: NewMockDirectoryWrapper
func NewMockDirectoryWrapper(random *rand.Rand, delegate store.Directory) *MockDirectoryWrapper {
ans := &MockDirectoryWrapper{
noDeleteOpenFile: true,
preventDoubleWrite: true,
trackDiskUsage: false,
wrapLockFactory: true,
openFilesForWrite: make(map[string]bool),
openLocks: make(map[string]bool),
openLocksLock: &sync.Mutex{},
throttling: THROTTLING_SOMETIMES,
inputCloneCount: 0,
openFileHandles: make(map[io.Closer]error),
failOnCreateOutput: true,
failOnOpenInput: true,
assertNoUnreferencedFilesOnClose: true,
}
ans.BaseDirectoryWrapperImpl = NewBaseDirectoryWrapper(delegate)
ans.Locker = &sync.Mutex{}
// must make a private random since our methods are called from different
// methods; else test failures may not be reproducible from the original
// seed
ans.randomState = rand.New(rand.NewSource(random.Int63()))
ans.throttledOutput = NewThrottledIndexOutput(
MBitsToBytes(40+ans.randomState.Intn(10)), 5+ans.randomState.Int63n(5), nil)
// force wrapping of LockFactory
ans.myLockFactory = newMockLockFactoryWrapper(ans, delegate.LockFactory())
ans.init()
return ans
}
示例3: Write
/*
Writes this vector to the file name in Directory d, in a format that
can be read by the constructor BitVector(Directory, String, IOContext)
*/
func (bv *BitVector) Write(d store.Directory, name string, ctx store.IOContext) (err error) {
assert(reflect.TypeOf(d).Name() != "CompoundFileDirectory")
var output store.IndexOutput
if output, err = d.CreateOutput(name, ctx); err != nil {
return err
}
defer func() {
err = mergeError(err, output.Close())
}()
if err = output.WriteInt(-2); err != nil {
return err
}
if err = codec.WriteHeader(output, CODEC, BV_VERSION_CURRENT); err != nil {
return err
}
if bv.isSparse() {
// sparse bit-set more efficiently saved as d-gaps.
err = bv.writeClearedDgaps(output)
} else {
err = bv.writeBits(output)
}
if err != nil {
return err
}
if err = codec.WriteFooter(output); err != nil {
return err
}
bv.assertCount()
return nil
}
示例4: writeLiveDocs
/*
Commit live docs (writes new _X_N.del files) and field update (writes
new _X_N.del files) to the directory; returns true if it wrote any
file and false if there were no new deletes or updates to write:
*/
func (rld *ReadersAndUpdates) writeLiveDocs(dir store.Directory) (bool, error) {
panic("not implemented yet")
rld.Lock()
defer rld.Unlock()
log.Printf("rld.writeLiveDocs seg=%v pendingDelCount=%v", rld.info, rld._pendingDeleteCount)
if rld._pendingDeleteCount != 0 {
// We have new deletes
assert(rld._liveDocs.Length() == rld.info.Info.DocCount())
// Do this so we can delete any created files on error; this
// saves all codecs from having to do it:
trackingDir := store.NewTrackingDirectoryWrapper(dir)
// We can write directly to the actual name (vs to a .tmp &
// renaming it) becaues the file is not live until segments file
// is written:
var success = false
defer func() {
if !success {
// Advance only the nextWriteDelGen so that a 2nd attempt to
// write will write to a new file
rld.info.AdvanceNextWriteDelGen()
// Delete any partially created files(s):
trackingDir.EachCreatedFiles(func(filename string) {
dir.DeleteFile(filename) // ignore error
})
}
}()
err := rld.info.Info.Codec().(Codec).LiveDocsFormat().WriteLiveDocs(rld._liveDocs.(util.MutableBits),
trackingDir, rld.info, rld._pendingDeleteCount, store.IO_CONTEXT_DEFAULT)
if err != nil {
return false, err
}
success = true
// If we hit an error in the line above (e.g. disk full) then
// info's delGen remains pointing to the previous (successfully
// written) del docs:
rld.info.AdvanceDelGen()
rld.info.SetDelCount(rld.info.DelCount() + rld._pendingDeleteCount)
assert(rld.info.DelCount() <= rld.info.Info.DocCount())
rld._pendingDeleteCount = 0
return true, nil
}
return false, nil
}
示例5: Write
func (w *Lucene40SegmentInfoWriter) Write(dir store.Directory,
si *SegmentInfo, fis FieldInfos, ctx store.IOContext) (err error) {
filename := util.SegmentFileName(si.Name, "", LUCENE40_SI_EXTENSION)
si.AddFile(filename)
var output store.IndexOutput
output, err = dir.CreateOutput(filename, ctx)
if err != nil {
return err
}
var success = false
defer func() {
if !success {
util.CloseWhileSuppressingError(output)
si.Dir.DeleteFile(filename) // ignore error
} else {
err = mergeError(err, output.Close())
}
}()
err = codec.WriteHeader(output, LUCENE40_CODEC_NAME, LUCENE40_VERSION_CURRENT)
if err != nil {
return err
}
// Write the Lucene version that created this segment, since 3.1
err = store.Stream(output).WriteString(si.Version().String()).
WriteInt(int32(si.DocCount())).
WriteByte(func() byte {
if si.IsCompoundFile() {
return SEGMENT_INFO_YES
}
return byte((SEGMENT_INFO_NO + 256) % 256) // Go byte is non-negative, unlike Java
}()).WriteStringStringMap(si.Diagnostics()).
WriteStringStringMap(map[string]string{}).
WriteStringSet(si.Files()).Close()
if err != nil {
return err
}
success = true
return nil
}
示例6: writeSegmentsGen
/*
A utility for writing the SEGMENTS_GEN file to a Directory.
NOTE: this is an internal utility which is kept public so that it's
accessible by code from other packages. You should avoid calling this
method unless you're absolutely sure what you're doing!
*/
func writeSegmentsGen(dir store.Directory, generation int64) {
if err := func() (err error) {
var genOutput store.IndexOutput
genOutput, err = dir.CreateOutput(INDEX_FILENAME_SEGMENTS_GEN, store.IO_CONTEXT_READONCE)
if err != nil {
return err
}
defer func() {
err = mergeError(err, genOutput.Close())
err = mergeError(err, dir.Sync([]string{INDEX_FILENAME_SEGMENTS_GEN}))
}()
if err = genOutput.WriteInt(FORMAT_SEGMENTS_GEN_CURRENT); err == nil {
if err = genOutput.WriteLong(generation); err == nil {
if err = genOutput.WriteLong(generation); err == nil {
err = codec.WriteFooter(genOutput)
}
}
}
return err
}(); err != nil {
// It's OK if we fail to write this file since it's used only as
// one of the retry fallbacks.
dir.DeleteFile(INDEX_FILENAME_SEGMENTS_GEN)
// Ignore error; this file is only used in a retry fallback on init
}
}
示例7: IsIndexExists
/*
Returns true if an index likely exists at the specified directory. Note that
if a corrupt index exists, or if an index in the process of committing
*/
func IsIndexExists(directory store.Directory) (ok bool, err error) {
// LUCENE-2812, LUCENE-2727, LUCENE-4738: this logic will
// return true in cases that should arguably be false,
// such as only IW.prepareCommit has been called, or a
// corrupt first commit, but it's too deadly to make
// this logic "smarter" and risk accidentally returning
// false due to various cases like file description
// exhaustion, access denied, etc., because in that
// case IndexWriter may delete the entire index. It's
// safer to err towards "index exists" than try to be
// smart about detecting not-yet-fully-committed or
// corrupt indices. This means that IndexWriter will
// throw an exception on such indices and the app must
// resolve the situation manually:
var files []string
files, err = directory.ListAll()
if _, ok := err.(*store.NoSuchDirectoryError); ok {
// Directory does not exist --> no index exists
return false, nil
} else if err != nil {
return false, err
}
return IsIndexFileExists(files), nil
}
示例8: NewLucene41PostingsReader
func NewLucene41PostingsReader(dir store.Directory,
fis model.FieldInfos, si *model.SegmentInfo,
ctx store.IOContext, segmentSuffix string) (r PostingsReaderBase, err error) {
log.Print("Initializing Lucene41PostingsReader...")
success := false
var docIn, posIn, payIn store.IndexInput = nil, nil, nil
defer func() {
if !success {
log.Print("Failed to initialize Lucene41PostingsReader.")
if err != nil {
log.Print("DEBUG ", err)
}
util.CloseWhileSuppressingError(docIn, posIn, payIn)
}
}()
docIn, err = dir.OpenInput(util.SegmentFileName(si.Name, segmentSuffix, LUCENE41_DOC_EXTENSION), ctx)
if err != nil {
return r, err
}
_, err = codec.CheckHeader(docIn, LUCENE41_DOC_CODEC, LUCENE41_VERSION_CURRENT, LUCENE41_VERSION_CURRENT)
if err != nil {
return r, err
}
forUtil, err := NewForUtil(docIn)
if err != nil {
return r, err
}
if fis.HasProx {
posIn, err = dir.OpenInput(util.SegmentFileName(si.Name, segmentSuffix, LUCENE41_POS_EXTENSION), ctx)
if err != nil {
return r, err
}
_, err = codec.CheckHeader(posIn, LUCENE41_POS_CODEC, LUCENE41_VERSION_CURRENT, LUCENE41_VERSION_CURRENT)
if err != nil {
return r, err
}
if fis.HasPayloads || fis.HasOffsets {
payIn, err = dir.OpenInput(util.SegmentFileName(si.Name, segmentSuffix, LUCENE41_PAY_EXTENSION), ctx)
if err != nil {
return r, err
}
_, err = codec.CheckHeader(payIn, LUCENE41_PAY_CODEC, LUCENE41_VERSION_CURRENT, LUCENE41_VERSION_CURRENT)
if err != nil {
return r, err
}
}
}
success = true
return &Lucene41PostingsReader{docIn, posIn, payIn, forUtil}, nil
}
示例9: Read
/*
Read a particular segmentFileName. Note that this may return IO error
if a commit is in process.
*/
func (sis *SegmentInfos) Read(directory store.Directory, segmentFileName string) (err error) {
// fmt.Printf("Reading segment info from %v...\n", segmentFileName)
// Clear any previous segments:
sis.Clear()
sis.generation = GenerationFromSegmentsFileName(segmentFileName)
sis.lastGeneration = sis.generation
var input store.ChecksumIndexInput
if input, err = directory.OpenChecksumInput(segmentFileName, store.IO_CONTEXT_READ); err != nil {
return
}
var success = false
defer func() {
if !success {
// Clear any segment infos we had loaded so we
// have a clean slate on retry:
sis.Clear()
util.CloseWhileSuppressingError(input)
} else {
err = input.Close()
}
}()
var format int
if format, err = asInt(input.ReadInt()); err != nil {
return
}
var actualFormat int
if format == codec.CODEC_MAGIC {
// 4.0+
if actualFormat, err = asInt(codec.CheckHeaderNoMagic(input, "segments", VERSION_40, VERSION_49)); err != nil {
return
}
if sis.version, err = input.ReadLong(); err != nil {
return
}
if sis.counter, err = asInt(input.ReadInt()); err != nil {
return
}
var numSegments int
if numSegments, err = asInt(input.ReadInt()); err != nil {
return
} else if numSegments < 0 {
return errors.New(fmt.Sprintf("invalid segment count: %v (resource: %v)", numSegments, input))
}
var segName, codecName string
var fCodec Codec
var delGen, fieldInfosGen, dvGen int64
var delCount int
for seg := 0; seg < numSegments; seg++ {
if segName, err = input.ReadString(); err != nil {
return
}
if codecName, err = input.ReadString(); err != nil {
return
}
fCodec = LoadCodec(codecName)
assert2(fCodec != nil, "Invalid codec name: %v", codecName)
// fmt.Printf("SIS.read seg=%v codec=%v\n", seg, fCodec)
var info *SegmentInfo
if info, err = fCodec.SegmentInfoFormat().SegmentInfoReader().Read(directory, segName, store.IO_CONTEXT_READ); err != nil {
return
}
info.SetCodec(fCodec)
if delGen, err = input.ReadLong(); err != nil {
return
}
if delCount, err = asInt(input.ReadInt()); err != nil {
return
} else if delCount < 0 || delCount > info.DocCount() {
return errors.New(fmt.Sprintf(
"invalid deletion count: %v vs docCount=%v (resource: %v)",
delCount, info.DocCount(), input))
}
fieldInfosGen = -1
if actualFormat >= VERSION_46 {
if fieldInfosGen, err = input.ReadLong(); err != nil {
return
}
}
dvGen = -1
if actualFormat >= VERSION_49 {
if dvGen, err = input.ReadLong(); err != nil {
return
}
} else {
dvGen = fieldInfosGen
}
siPerCommit := NewSegmentCommitInfo(info, delCount, delGen, fieldInfosGen, dvGen)
if actualFormat >= VERSION_46 {
if actualFormat < VERSION_49 {
panic("not implemented yet")
//.........这里部分代码省略.........
示例10: NewIndexWriter
/*
Constructs a new IndexWriter per the settings given in conf. If you want to
make "live" changes to this writer instance, use Config().
NOTE: after this writer is created, the given configuration instance cannot be
passed to another writer. If you intend to do so, you should clone it
beforehand.
*/
func NewIndexWriter(d store.Directory, conf *IndexWriterConfig) (w *IndexWriter, err error) {
ans := &IndexWriter{
Locker: &sync.Mutex{},
ClosingControl: newClosingControl(),
segmentsToMerge: make(map[*SegmentInfoPerCommit]bool),
mergeExceptions: make([]*OneMerge, 0),
doAfterFlush: func() error { return nil },
doBeforeFlush: func() error { return nil },
commitLock: &sync.Mutex{},
fullFlushLock: &sync.Mutex{},
config: newLiveIndexWriterConfigFrom(conf),
directory: d,
analyzer: conf.analyzer,
infoStream: conf.infoStream,
mergePolicy: conf.mergePolicy,
mergeScheduler: conf.mergeScheduler,
codec: conf.codec,
bufferedDeletesStream: newBufferedDeletesStream(conf.infoStream),
poolReaders: conf.readerPooling,
writeLock: d.MakeLock(WRITE_LOCK_NAME),
}
ans.readerPool = newReaderPool(ans)
ans.MergeControl = newMergeControl(conf.infoStream, ans.readerPool)
conf.setIndexWriter(ans)
ans.mergePolicy.SetIndexWriter(ans)
// obtain write lock
if ok, err := ans.writeLock.ObtainWithin(conf.writeLockTimeout); !ok || err != nil {
if err != nil {
return nil, err
}
return nil, errors.New(fmt.Sprintf("Index locked for write: %v", ans.writeLock))
}
var success bool = false
defer func() {
if !success {
if ans.infoStream.IsEnabled("IW") {
ans.infoStream.Message("IW", "init: hit exception on init; releasing write lock")
}
ans.writeLock.Release() // don't mask the original exception
ans.writeLock = nil
}
}()
var create bool
switch conf.openMode {
case OPEN_MODE_CREATE:
create = true
case OPEN_MODE_APPEND:
create = false
default:
// CREATE_OR_APPEND - create only if an index does not exist
ok, err := IsIndexExists(d)
if err != nil {
return nil, err
}
create = !ok
}
// If index is too old, reading the segments will return
// IndexFormatTooOldError
ans.segmentInfos = &SegmentInfos{}
var initialIndexExists bool = true
if create {
// Try to read first. This is to allow create against an index
// that's currently open for searching. In this case we write the
// next segments_N file with no segments:
err = ans.segmentInfos.ReadAll(d)
if err == nil {
ans.segmentInfos.Clear()
} else {
// Likely this means it's a fresh directory
initialIndexExists = false
err = nil
}
// Record that we have a change (zero out all segments) pending:
ans.changed()
} else {
err = ans.segmentInfos.ReadAll(d)
if err != nil {
return
}
//.........这里部分代码省略.........
示例11: Read
func (r *Lucene40SegmentInfoReader) Read(dir store.Directory,
segment string, context store.IOContext) (si *SegmentInfo, err error) {
si = new(SegmentInfo)
fileName := util.SegmentFileName(segment, "", LUCENE40_SI_EXTENSION)
input, err := dir.OpenInput(fileName, context)
if err != nil {
return nil, err
}
success := false
defer func() {
if !success {
util.CloseWhileSuppressingError(input)
} else {
input.Close()
}
}()
_, err = codec.CheckHeader(input, LUCENE40_CODEC_NAME, LUCENE40_VERSION_START, LUCENE40_VERSION_CURRENT)
if err != nil {
return nil, err
}
versionStr, err := input.ReadString()
if err != nil {
return nil, err
}
version, err := util.ParseVersion(versionStr)
if err != nil {
return nil, err
}
docCount, err := input.ReadInt()
if err != nil {
return nil, err
}
if docCount < 0 {
return nil, errors.New(fmt.Sprintf("invalid docCount: %v (resource=%v)", docCount, input))
}
sicf, err := input.ReadByte()
if err != nil {
return nil, err
}
isCompoundFile := (sicf == SEGMENT_INFO_YES)
diagnostics, err := input.ReadStringStringMap()
if err != nil {
return nil, err
}
_, err = input.ReadStringStringMap() // read deprecated attributes
if err != nil {
return nil, err
}
files, err := input.ReadStringSet()
if err != nil {
return nil, err
}
if err = codec.CheckEOF(input); err != nil {
return nil, err
}
si = NewSegmentInfo(dir, version, segment, int(docCount), isCompoundFile, nil, diagnostics)
si.SetFiles(files)
success = true
return si, nil
}
示例12: newCompressingStoredFieldsReader
// Sole constructor
func newCompressingStoredFieldsReader(d store.Directory,
si *model.SegmentInfo, segmentSuffix string,
fn model.FieldInfos, ctx store.IOContext, formatName string,
compressionMode CompressionMode) (r *CompressingStoredFieldsReader, err error) {
r = &CompressingStoredFieldsReader{}
r.compressionMode = compressionMode
segment := si.Name
r.fieldInfos = fn
r.numDocs = si.DocCount()
var indexStream store.ChecksumIndexInput
success := false
defer func() {
if !success {
util.CloseWhileSuppressingError(r, indexStream)
}
}()
indexStreamFN := util.SegmentFileName(segment, segmentSuffix, lucene40.FIELDS_INDEX_EXTENSION)
fieldsStreamFN := util.SegmentFileName(segment, segmentSuffix, lucene40.FIELDS_EXTENSION)
// Load the index into memory
if indexStream, err = d.OpenChecksumInput(indexStreamFN, ctx); err != nil {
return nil, err
}
codecNameIdx := formatName + CODEC_SFX_IDX
if r.version, err = int32AsInt(codec.CheckHeader(indexStream, codecNameIdx,
VERSION_START, VERSION_CURRENT)); err != nil {
return nil, err
}
assert(int64(codec.HeaderLength(codecNameIdx)) == indexStream.FilePointer())
if r.indexReader, err = newCompressingStoredFieldsIndexReader(indexStream, si); err != nil {
return nil, err
}
var maxPointer int64 = -1
if r.version >= VERSION_CHECKSUM {
if maxPointer, err = indexStream.ReadVLong(); err != nil {
return nil, err
}
if _, err = codec.CheckFooter(indexStream); err != nil {
return nil, err
}
} else {
if err = codec.CheckEOF(indexStream); err != nil {
return nil, err
}
}
if err = indexStream.Close(); err != nil {
return nil, err
}
indexStream = nil
// Open the data file and read metadata
if r.fieldsStream, err = d.OpenInput(fieldsStreamFN, ctx); err != nil {
return nil, err
}
if r.version >= VERSION_CHECKSUM {
if maxPointer+codec.FOOTER_LENGTH != r.fieldsStream.Length() {
return nil, errors.New(fmt.Sprintf(
"Invalid fieldsStream maxPointer (file truncated?): maxPointer=%v, length=%v",
maxPointer, r.fieldsStream.Length()))
}
} else {
maxPointer = r.fieldsStream.Length()
}
r.maxPointer = maxPointer
codecNameDat := formatName + CODEC_SFX_DAT
var fieldsVersion int
if fieldsVersion, err = int32AsInt(codec.CheckHeader(r.fieldsStream,
codecNameDat, VERSION_START, VERSION_CURRENT)); err != nil {
return nil, err
}
assert2(r.version == fieldsVersion,
"Version mismatch between stored fields index and data: %v != %v",
r.version, fieldsVersion)
assert(int64(codec.HeaderLength(codecNameDat)) == r.fieldsStream.FilePointer())
r.chunkSize = -1
if r.version >= VERSION_BIG_CHUNKS {
if r.chunkSize, err = int32AsInt(r.fieldsStream.ReadVInt()); err != nil {
return nil, err
}
}
if r.packedIntsVersion, err = int32AsInt(r.fieldsStream.ReadVInt()); err != nil {
return nil, err
}
r.decompressor = compressionMode.NewDecompressor()
r.bytes = make([]byte, 0)
if r.version >= VERSION_CHECKSUM {
// NOTE: data file is too costly to verify checksum against all the
// bytes on open, but fo rnow we at least verify proper structure
// of the checksum footer: which looks for FOOTER_MATIC +
// algorithmID. This is cheap and can detect some forms of
// corruption such as file trucation.
//.........这里部分代码省略.........
示例13: NewLucene41PostingsReader
func NewLucene41PostingsReader(dir store.Directory,
fis FieldInfos, si *SegmentInfo,
ctx store.IOContext, segmentSuffix string) (r PostingsReaderBase, err error) {
// fmt.Println("Initializing Lucene41PostingsReader...")
success := false
var docIn, posIn, payIn store.IndexInput = nil, nil, nil
defer func() {
if !success {
fmt.Println("Failed to initialize Lucene41PostingsReader.")
util.CloseWhileSuppressingError(docIn, posIn, payIn)
}
}()
docIn, err = dir.OpenInput(util.SegmentFileName(si.Name, segmentSuffix, LUCENE41_DOC_EXTENSION), ctx)
if err != nil {
return nil, err
}
var version int32
version, err = codec.CheckHeader(docIn, LUCENE41_DOC_CODEC, LUCENE41_VERSION_START, LUCENE41_VERSION_CURRENT)
if err != nil {
return nil, err
}
forUtil, err := NewForUtilFrom(docIn)
if err != nil {
return nil, err
}
if version >= LUCENE41_VERSION_CHECKSUM {
// NOTE: data file is too costly to verify checksum against all the
// bytes on open, but for now we at least verify proper structure
// of the checksum footer: which looks for FOOTER_MAGIC +
// algorithmID. This is cheap and can detect some forms of
// corruption such as file trucation.
if _, err = codec.RetrieveChecksum(docIn); err != nil {
return nil, err
}
}
if fis.HasProx {
posIn, err = dir.OpenInput(util.SegmentFileName(si.Name, segmentSuffix, LUCENE41_POS_EXTENSION), ctx)
if err != nil {
return nil, err
}
_, err = codec.CheckHeader(posIn, LUCENE41_POS_CODEC, version, version)
if err != nil {
return nil, err
}
if version >= LUCENE41_VERSION_CHECKSUM {
// NOTE: data file is too costly to verify checksum against all the
// bytes on open, but for now we at least verify proper structure
// of the checksum footer: which looks for FOOTER_MAGIC +
// algorithmID. This is cheap and can detect some forms of
// corruption such as file trucation.
if _, err = codec.RetrieveChecksum(posIn); err != nil {
return nil, err
}
}
if fis.HasPayloads || fis.HasOffsets {
payIn, err = dir.OpenInput(util.SegmentFileName(si.Name, segmentSuffix, LUCENE41_PAY_EXTENSION), ctx)
if err != nil {
return nil, err
}
_, err = codec.CheckHeader(payIn, LUCENE41_PAY_CODEC, version, version)
if err != nil {
return nil, err
}
if version >= LUCENE41_VERSION_CHECKSUM {
// NOTE: data file is too costly to verify checksum against all the
// bytes on open, but for now we at least verify proper structure
// of the checksum footer: which looks for FOOTER_MAGIC +
// algorithmID. This is cheap and can detect some forms of
// corruption such as file trucation.
if _, err = codec.RetrieveChecksum(payIn); err != nil {
return nil, err
}
}
}
}
success = true
return &Lucene41PostingsReader{docIn, posIn, payIn, forUtil, int(version)}, nil
}
示例14: newBlockTreeTermsReader
func newBlockTreeTermsReader(dir store.Directory,
fieldInfos model.FieldInfos, info *model.SegmentInfo,
postingsReader PostingsReaderBase, ctx store.IOContext,
segmentSuffix string, indexDivisor int) (p FieldsProducer, err error) {
log.Print("Initializing BlockTreeTermsReader...")
fp := &BlockTreeTermsReader{
postingsReader: postingsReader,
fields: make(map[string]FieldReader),
segment: info.Name,
}
fp.in, err = dir.OpenInput(util.SegmentFileName(info.Name, segmentSuffix, BTT_EXTENSION), ctx)
if err != nil {
return fp, err
}
success := false
var indexIn store.IndexInput
defer func() {
if !success {
log.Print("Failed to initialize BlockTreeTermsReader.")
if err != nil {
log.Print("DEBUG ", err)
}
// this.close() will close in:
util.CloseWhileSuppressingError(indexIn, fp)
}
}()
fp.version, err = fp.readHeader(fp.in)
if err != nil {
return fp, err
}
log.Printf("Version: %v", fp.version)
if indexDivisor != -1 {
indexIn, err = dir.OpenInput(util.SegmentFileName(info.Name, segmentSuffix, BTT_INDEX_EXTENSION), ctx)
if err != nil {
return fp, err
}
indexVersion, err := fp.readIndexHeader(indexIn)
if err != nil {
return fp, err
}
log.Printf("Index version: %v", indexVersion)
if int(indexVersion) != fp.version {
return fp, errors.New(fmt.Sprintf("mixmatched version files: %v=%v,%v=%v", fp.in, fp.version, indexIn, indexVersion))
}
}
// Have PostingsReader init itself
postingsReader.Init(fp.in)
// Read per-field details
fp.seekDir(fp.in, fp.dirOffset)
if indexDivisor != -1 {
fp.seekDir(indexIn, fp.indexDirOffset)
}
numFields, err := fp.in.ReadVInt()
if err != nil {
return fp, err
}
log.Printf("Fields number: %v", numFields)
if numFields < 0 {
return fp, errors.New(fmt.Sprintf("invalid numFields: %v (resource=%v)", numFields, fp.in))
}
for i := int32(0); i < numFields; i++ {
log.Printf("Next field...")
field, err := fp.in.ReadVInt()
if err != nil {
return fp, err
}
log.Printf("Field: %v", field)
numTerms, err := fp.in.ReadVLong()
if err != nil {
return fp, err
}
// assert numTerms >= 0
log.Printf("Terms number: %v", numTerms)
numBytes, err := fp.in.ReadVInt()
if err != nil {
return fp, err
}
log.Printf("Bytes number: %v", numBytes)
rootCode := make([]byte, numBytes)
err = fp.in.ReadBytes(rootCode)
if err != nil {
return fp, err
}
fieldInfo := fieldInfos.FieldInfoByNumber(int(field))
// assert fieldInfo != nil
var sumTotalTermFreq int64
if fieldInfo.IndexOptions() == model.INDEX_OPT_DOCS_ONLY {
sumTotalTermFreq = -1
//.........这里部分代码省略.........
示例15: NewCompressingStoredFieldsWriter
func NewCompressingStoredFieldsWriter(dir store.Directory, si *model.SegmentInfo,
segmentSuffix string, ctx store.IOContext, formatName string,
compressionMode CompressionMode, chunkSize int) (*CompressingStoredFieldsWriter, error) {
assert(dir != nil)
ans := &CompressingStoredFieldsWriter{
directory: dir,
segment: si.Name,
segmentSuffix: segmentSuffix,
compressionMode: compressionMode,
compressor: compressionMode.NewCompressor(),
chunkSize: chunkSize,
docBase: 0,
bufferedDocs: newGrowableByteArrayDataOutput(chunkSize),
numStoredFields: make([]int, 16),
endOffsets: make([]int, 16),
numBufferedDocs: 0,
}
var success = false
indexStream, err := dir.CreateOutput(util.SegmentFileName(si.Name, segmentSuffix,
lucene40.FIELDS_INDEX_EXTENSION), ctx)
if err != nil {
return nil, err
}
assert(indexStream != nil)
defer func() {
if !success {
util.CloseWhileSuppressingError(indexStream)
ans.Abort()
}
}()
ans.fieldsStream, err = dir.CreateOutput(util.SegmentFileName(si.Name, segmentSuffix,
lucene40.FIELDS_EXTENSION), ctx)
if err != nil {
return nil, err
}
codecNameIdx := formatName + CODEC_SFX_IDX
codecNameDat := formatName + CODEC_SFX_DAT
err = codec.WriteHeader(indexStream, codecNameIdx, VERSION_CURRENT)
if err != nil {
return nil, err
}
err = codec.WriteHeader(ans.fieldsStream, codecNameDat, VERSION_CURRENT)
if err != nil {
return nil, err
}
assert(int64(codec.HeaderLength(codecNameIdx)) == indexStream.FilePointer())
assert(int64(codec.HeaderLength(codecNameDat)) == ans.fieldsStream.FilePointer())
ans.indexWriter, err = NewStoredFieldsIndexWriter(indexStream)
if err != nil {
return nil, err
}
assert(ans.indexWriter != nil)
indexStream = nil
err = ans.fieldsStream.WriteVInt(int32(chunkSize))
if err != nil {
return nil, err
}
err = ans.fieldsStream.WriteVInt(packed.VERSION_CURRENT)
if err != nil {
return nil, err
}
success = true
return ans, nil
}