本文整理匯總了Golang中github.com/balzaczyy/golucene/core/util.SegmentFileName函數的典型用法代碼示例。如果您正苦於以下問題:Golang SegmentFileName函數的具體用法?Golang SegmentFileName怎麽用?Golang SegmentFileName使用的例子?那麽, 這裏精選的函數代碼示例或許可以為您提供幫助。
在下文中一共展示了SegmentFileName函數的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: newLucene49NormsConsumer
func newLucene49NormsConsumer(state *SegmentWriteState,
dataCodec, dataExtension, metaCodec, metaExtension string) (nc *NormsConsumer, err error) {
assert(packed.PackedFormat(packed.PACKED_SINGLE_BLOCK).IsSupported(1))
assert(packed.PackedFormat(packed.PACKED_SINGLE_BLOCK).IsSupported(2))
assert(packed.PackedFormat(packed.PACKED_SINGLE_BLOCK).IsSupported(4))
nc = &NormsConsumer{maxDoc: state.SegmentInfo.DocCount()}
var success = false
defer func() {
if !success {
util.CloseWhileSuppressingError(nc)
}
}()
dataName := util.SegmentFileName(state.SegmentInfo.Name, state.SegmentSuffix, dataExtension)
if nc.data, err = state.Directory.CreateOutput(dataName, state.Context); err != nil {
return nil, err
}
if err = codec.WriteHeader(nc.data, dataCodec, VERSION_CURRENT); err != nil {
return nil, err
}
metaName := util.SegmentFileName(state.SegmentInfo.Name, state.SegmentSuffix, metaExtension)
if nc.meta, err = state.Directory.CreateOutput(metaName, state.Context); err != nil {
return nil, err
}
if err = codec.WriteHeader(nc.meta, metaCodec, VERSION_CURRENT); err != nil {
return nil, err
}
success = true
return nc, nil
}
示例2: Abort
func (w *CompressingStoredFieldsWriter) Abort() {
assert(w != nil)
util.CloseWhileSuppressingError(w)
util.DeleteFilesIgnoringErrors(w.directory,
util.SegmentFileName(w.segment, w.segmentSuffix, lucene40.FIELDS_EXTENSION),
util.SegmentFileName(w.segment, w.segmentSuffix, lucene40.FIELDS_INDEX_EXTENSION))
}
示例3: Abort
func (w *CompressingStoredFieldsWriter) Abort() {
if w == nil { // tolerate early released pointer
return
}
util.CloseWhileSuppressingError(w)
util.DeleteFilesIgnoringErrors(w.directory,
util.SegmentFileName(w.segment, w.segmentSuffix, lucene40.FIELDS_EXTENSION),
util.SegmentFileName(w.segment, w.segmentSuffix, lucene40.FIELDS_INDEX_EXTENSION))
}
示例4: NewLucene41PostingsReader
func NewLucene41PostingsReader(dir store.Directory,
fis model.FieldInfos, si *model.SegmentInfo,
ctx store.IOContext, segmentSuffix string) (r PostingsReaderBase, err error) {
log.Print("Initializing Lucene41PostingsReader...")
success := false
var docIn, posIn, payIn store.IndexInput = nil, nil, nil
defer func() {
if !success {
log.Print("Failed to initialize Lucene41PostingsReader.")
if err != nil {
log.Print("DEBUG ", err)
}
util.CloseWhileSuppressingError(docIn, posIn, payIn)
}
}()
docIn, err = dir.OpenInput(util.SegmentFileName(si.Name, segmentSuffix, LUCENE41_DOC_EXTENSION), ctx)
if err != nil {
return r, err
}
_, err = codec.CheckHeader(docIn, LUCENE41_DOC_CODEC, LUCENE41_VERSION_CURRENT, LUCENE41_VERSION_CURRENT)
if err != nil {
return r, err
}
forUtil, err := NewForUtil(docIn)
if err != nil {
return r, err
}
if fis.HasProx {
posIn, err = dir.OpenInput(util.SegmentFileName(si.Name, segmentSuffix, LUCENE41_POS_EXTENSION), ctx)
if err != nil {
return r, err
}
_, err = codec.CheckHeader(posIn, LUCENE41_POS_CODEC, LUCENE41_VERSION_CURRENT, LUCENE41_VERSION_CURRENT)
if err != nil {
return r, err
}
if fis.HasPayloads || fis.HasOffsets {
payIn, err = dir.OpenInput(util.SegmentFileName(si.Name, segmentSuffix, LUCENE41_PAY_EXTENSION), ctx)
if err != nil {
return r, err
}
_, err = codec.CheckHeader(payIn, LUCENE41_PAY_CODEC, LUCENE41_VERSION_CURRENT, LUCENE41_VERSION_CURRENT)
if err != nil {
return r, err
}
}
}
success = true
return &Lucene41PostingsReader{docIn, posIn, payIn, forUtil}, nil
}
示例5: newLucene42DocValuesProducer
func newLucene42DocValuesProducer(state SegmentReadState,
dataCodec, dataExtension, metaCodec, metaExtension string) (dvp *Lucene42DocValuesProducer, err error) {
dvp = &Lucene42DocValuesProducer{
numericInstances: make(map[int]NumericDocValues),
}
dvp.maxDoc = state.segmentInfo.DocCount()
metaName := util.SegmentFileName(state.segmentInfo.Name, state.segmentSuffix, metaExtension)
// read in the entries from the metadata file.
in, err := state.dir.OpenInput(metaName, state.context)
if err != nil {
return dvp, err
}
success := false
defer func() {
if success {
err = util.Close(in)
} else {
util.CloseWhileSuppressingError(in)
}
}()
version, err := codec.CheckHeader(in, metaCodec, LUCENE42_DV_VERSION_START, LUCENE42_DV_VERSION_CURRENT)
if err != nil {
return dvp, err
}
dvp.numerics = make(map[int]NumericEntry)
dvp.binaries = make(map[int]BinaryEntry)
dvp.fsts = make(map[int]FSTEntry)
err = dvp.readFields(in)
if err != nil {
return dvp, err
}
success = true
success = false
dataName := util.SegmentFileName(state.segmentInfo.Name, state.segmentSuffix, dataExtension)
dvp.data, err = state.dir.OpenInput(dataName, state.context)
if err != nil {
return dvp, err
}
version2, err := codec.CheckHeader(dvp.data, dataCodec, LUCENE42_DV_VERSION_START, LUCENE42_DV_VERSION_CURRENT)
if err != nil {
return dvp, err
}
if version != version2 {
return dvp, errors.New("Format versions mismatch")
}
return dvp, nil
}
示例6: ReadFieldInfos
/* Reads the most recent FieldInfos of the given segment info. */
func ReadFieldInfos(info *SegmentCommitInfo) (fis FieldInfos, err error) {
var dir store.Directory
var closeDir bool
if info.FieldInfosGen() == -1 && info.Info.IsCompoundFile() {
// no fieldInfos gen and segment uses a compound file
if dir, err = store.NewCompoundFileDirectory(info.Info.Dir,
util.SegmentFileName(info.Info.Name, "", store.COMPOUND_FILE_EXTENSION),
store.IO_CONTEXT_READONCE, false); err != nil {
return
}
closeDir = true
} else {
// gen'd FIS are read outside CFS, or the segment doesn't use a compound file
dir = info.Info.Dir
closeDir = false
}
defer func() {
if closeDir {
err = mergeError(err, dir.Close())
}
}()
var segmentSuffix string
if n := info.FieldInfosGen(); n != -1 {
segmentSuffix = strconv.FormatInt(n, 36)
}
codec := info.Info.Codec().(Codec)
fisFormat := codec.FieldInfosFormat()
return fisFormat.FieldInfosReader()(dir, info.Info.Name, segmentSuffix, store.IO_CONTEXT_READONCE)
}
示例7: newCompoundFileWriter
/*
Create the compound stream in the specified file. The filename is the
entire name (no extensions are added).
*/
func newCompoundFileWriter(dir Directory, name string) *CompoundFileWriter {
assert2(dir != nil, "directory cannot be nil")
assert2(name != "", "name cannot be empty")
return &CompoundFileWriter{
Locker: &sync.Mutex{},
directory: dir,
entries: make(map[string]*FileEntry),
seenIDs: make(map[string]bool),
pendingEntries: list.New(),
outputTaken: NewAtomicBool(),
entryTableName: util.SegmentFileName(
util.StripExtension(name),
"",
COMPOUND_FILE_ENTRIES_EXTENSION,
),
dataFileName: name,
}
}
示例8: TestReadingFromSlicedIndexInputOSX
func TestReadingFromSlicedIndexInputOSX(t *testing.T) {
t.Logf("TestReadingFromSlicedIndexInputOSX...")
path := "../search/testdata/osx/belfrysample"
d, err := OpenFSDirectory(path)
if err != nil {
t.Error(err)
}
ctx := NewIOContextBool(false)
cd, err := NewCompoundFileDirectory(d, "_0.cfs", ctx, false)
name := util.SegmentFileName("_0", "Lucene41_0", "pos")
posIn, err := cd.OpenInput(name, ctx)
if err != nil {
t.Error(err)
}
t.Log(posIn)
codec.CheckHeader(posIn, "Lucene41PostingsWriterPos", 0, 0)
// codec header mismatch: actual header=0 vs expected header=1071082519 (resource: SlicedIndexInput(SlicedIndexInput(_0_Lucene41_0.pos in SimpleFSIndexInput(path='/private/tmp/kc/index/belfrysample/_0.cfs')) in SimpleFSIndexInput(path='/private/tmp/kc/index/belfrysample/_0.cfs') slice=1461:3426))
}
示例9: Write
func (w *Lucene40SegmentInfoWriter) Write(dir store.Directory,
si *SegmentInfo, fis FieldInfos, ctx store.IOContext) (err error) {
filename := util.SegmentFileName(si.Name, "", LUCENE40_SI_EXTENSION)
si.AddFile(filename)
var output store.IndexOutput
output, err = dir.CreateOutput(filename, ctx)
if err != nil {
return err
}
var success = false
defer func() {
if !success {
util.CloseWhileSuppressingError(output)
si.Dir.DeleteFile(filename) // ignore error
} else {
err = mergeError(err, output.Close())
}
}()
err = codec.WriteHeader(output, LUCENE40_CODEC_NAME, LUCENE40_VERSION_CURRENT)
if err != nil {
return err
}
// Write the Lucene version that created this segment, since 3.1
err = store.Stream(output).WriteString(si.Version().String()).
WriteInt(int32(si.DocCount())).
WriteByte(func() byte {
if si.IsCompoundFile() {
return SEGMENT_INFO_YES
}
return byte((SEGMENT_INFO_NO + 256) % 256) // Go byte is non-negative, unlike Java
}()).WriteStringStringMap(si.Diagnostics()).
WriteStringStringMap(map[string]string{}).
WriteStringSet(si.Files()).Close()
if err != nil {
return err
}
success = true
return nil
}
示例10: newLucene49NormsProducer
func newLucene49NormsProducer(state SegmentReadState,
dataCodec, dataExtension, metaCodec, metaExtension string) (np *NormsProducer, err error) {
np = &NormsProducer{
Locker: new(sync.Mutex),
norms: make(map[int]*NormsEntry),
instances: make(map[int]NumericDocValues),
maxDoc: state.SegmentInfo.DocCount(),
ramBytesUsed: util.ShallowSizeOfInstance(reflect.TypeOf(np)),
}
metaName := util.SegmentFileName(state.SegmentInfo.Name, state.SegmentSuffix, metaExtension)
// read in the entries from the metadta file.
var in store.ChecksumIndexInput
if in, err = state.Dir.OpenChecksumInput(metaName, state.Context); err != nil {
return nil, err
}
if err = func() error {
var success = false
defer func() {
if success {
err = util.Close(in)
} else {
util.CloseWhileSuppressingError(in)
}
}()
if np.version, err = codec.CheckHeader(in, metaCodec, VERSION_START, VERSION_CURRENT); err != nil {
return err
}
if err = np.readFields(in, state.FieldInfos); err != nil {
return err
}
if _, err = codec.CheckFooter(in); err != nil {
return err
}
success = true
return nil
}(); err != nil {
return nil, err
}
dataName := util.SegmentFileName(state.SegmentInfo.Name, state.SegmentSuffix, dataExtension)
if np.data, err = state.Dir.OpenInput(dataName, state.Context); err != nil {
return nil, err
}
var success = false
defer func() {
if !success {
util.CloseWhileSuppressingError(np.data)
}
}()
var version2 int32
if version2, err = codec.CheckHeader(np.data, dataCodec, VERSION_START, VERSION_CURRENT); err != nil {
return nil, err
}
if version2 != np.version {
return nil, errors.New("Format versions mismatch")
}
// NOTE: data file is too costly to verify checksum against all the
// bytes on open, but fo rnow we at least verify proper structure
// of the checksum footer: which looks for FOOTER_MATIC +
// algorithmID. This is cheap and can detect some forms of
// corruption such as file trucation.
if _, err = codec.RetrieveChecksum(np.data); err != nil {
return nil, err
}
success = true
return np, nil
}
示例11: newSegmentCoreReaders
func newSegmentCoreReaders(owner *SegmentReader, dir store.Directory, si *SegmentInfoPerCommit,
context store.IOContext, termsIndexDivisor int) (self SegmentCoreReaders, err error) {
if termsIndexDivisor == 0 {
panic("indexDivisor must be < 0 (don't load terms index) or greater than 0 (got 0)")
}
log.Printf("Initializing SegmentCoreReaders from directory: %v", dir)
self = SegmentCoreReaders{
refCount: 1,
normsLocal: func() map[string]interface{} {
return make(map[string]interface{})
},
}
self.fieldsReaderLocal = func() StoredFieldsReader {
return self.fieldsReaderOrig.Clone()
}
log.Print("Initializing listeners...")
self.addListener = make(chan CoreClosedListener)
self.removeListener = make(chan CoreClosedListener)
self.notifyListener = make(chan *SegmentReader)
// TODO re-enable later
go func() { // ensure listners are synchronized
coreClosedListeners := make([]CoreClosedListener, 0)
isRunning := true
var listener CoreClosedListener
for isRunning {
log.Print("Listening for events...")
select {
case listener = <-self.addListener:
coreClosedListeners = append(coreClosedListeners, listener)
case listener = <-self.removeListener:
n := len(coreClosedListeners)
for i, v := range coreClosedListeners {
if v == listener {
newListeners := make([]CoreClosedListener, 0, n-1)
newListeners = append(newListeners, coreClosedListeners[0:i]...)
newListeners = append(newListeners, coreClosedListeners[i+1:]...)
coreClosedListeners = newListeners
break
}
}
case owner := <-self.notifyListener:
log.Print("Shutting down SegmentCoreReaders...")
isRunning = false
for _, v := range coreClosedListeners {
v.onClose(owner)
}
}
}
log.Print("Listeners are done.")
}()
success := false
defer func() {
if !success {
log.Print("Failed to initialize SegmentCoreReaders.")
self.decRef()
}
}()
codec := si.info.Codec().(Codec)
log.Print("Obtaining CFS Directory...")
var cfsDir store.Directory // confusing name: if (cfs) its the cfsdir, otherwise its the segment's directory.
if si.info.IsCompoundFile() {
log.Print("Detected CompoundFile.")
name := util.SegmentFileName(si.info.Name, "", store.COMPOUND_FILE_EXTENSION)
self.cfsReader, err = store.NewCompoundFileDirectory(dir, name, context, false)
if err != nil {
return self, err
}
log.Printf("CompoundFileDirectory: %v", self.cfsReader)
cfsDir = self.cfsReader
} else {
cfsDir = dir
}
log.Printf("CFS Directory: %v", cfsDir)
log.Print("Reading FieldInfos...")
self.fieldInfos, err = codec.FieldInfosFormat().FieldInfosReader()(cfsDir, si.info.Name, store.IO_CONTEXT_READONCE)
if err != nil {
return self, err
}
self.termsIndexDivisor = termsIndexDivisor
format := codec.PostingsFormat()
log.Print("Obtaining SegmentReadState...")
segmentReadState := newSegmentReadState(cfsDir, si.info, self.fieldInfos, context, termsIndexDivisor)
// Ask codec for its Fields
log.Print("Obtaining FieldsProducer...")
self.fields, err = format.FieldsProducer(segmentReadState)
if err != nil {
return self, err
}
assert(self.fields != nil)
// ask codec for its Norms:
// TODO: since we don't write any norms file if there are no norms,
// kinda jaky to assume the codec handles the case of no norms file at all gracefully?!
if self.fieldInfos.HasDocValues {
//.........這裏部分代碼省略.........
示例12: newCompressingStoredFieldsReader
// Sole constructor
func newCompressingStoredFieldsReader(d store.Directory,
si *model.SegmentInfo, segmentSuffix string,
fn model.FieldInfos, ctx store.IOContext, formatName string,
compressionMode compressing.CompressionMode) (r *CompressingStoredFieldsReader, err error) {
r = &CompressingStoredFieldsReader{}
r.compressionMode = compressionMode
segment := si.Name
r.fieldInfos = fn
r.numDocs = si.DocCount()
var indexStream store.IndexInput
success := false
defer func() {
if !success {
log.Println("Failed to initialize CompressionStoredFieldsReader.")
if err != nil {
log.Print(err)
}
util.Close(r, indexStream)
}
}()
// Load the index into memory
indexStreamFN := util.SegmentFileName(segment, segmentSuffix, lucene40.FIELDS_INDEX_EXTENSION)
indexStream, err = d.OpenInput(indexStreamFN, ctx)
if err != nil {
return nil, err
}
codecNameIdx := formatName + CODEC_SFX_IDX
codec.CheckHeader(indexStream, codecNameIdx, CODEC_SFX_VERSION_START, CODEC_SFX_VERSION_CURRENT)
if int64(codec.HeaderLength(codecNameIdx)) != indexStream.FilePointer() {
panic("assert fail")
}
r.indexReader, err = newCompressingStoredFieldsIndexReader(indexStream, si)
if err != nil {
return nil, err
}
err = indexStream.Close()
if err != nil {
return nil, err
}
indexStream = nil
// Open the data file and read metadata
fieldsStreamFN := util.SegmentFileName(segment, segmentSuffix, lucene40.FIELDS_EXTENSION)
r.fieldsStream, err = d.OpenInput(fieldsStreamFN, ctx)
if err != nil {
return nil, err
}
codecNameDat := formatName + CODEC_SFX_DAT
codec.CheckHeader(r.fieldsStream, codecNameDat, CODEC_SFX_VERSION_START, CODEC_SFX_VERSION_CURRENT)
if int64(codec.HeaderLength(codecNameDat)) != r.fieldsStream.FilePointer() {
panic("assert fail")
}
n, err := r.fieldsStream.ReadVInt()
if err != nil {
return nil, err
}
r.packedIntsVersion = int(n)
r.decompressor = compressionMode.NewDecompressor()
r.bytes = make([]byte, 0)
success = true
return r, nil
}
示例13: NewBlockTreeTermsReader
func NewBlockTreeTermsReader(dir store.Directory,
fieldInfos FieldInfos, info *SegmentInfo,
postingsReader PostingsReaderBase, ctx store.IOContext,
segmentSuffix string, indexDivisor int) (p FieldsProducer, err error) {
// log.Print("Initializing BlockTreeTermsReader...")
fp := &BlockTreeTermsReader{
postingsReader: postingsReader,
fields: make(map[string]FieldReader),
segment: info.Name,
}
fp.in, err = dir.OpenInput(util.SegmentFileName(info.Name, segmentSuffix, TERMS_EXTENSION), ctx)
if err != nil {
return nil, err
}
success := false
var indexIn store.IndexInput
defer func() {
if !success {
fmt.Println("Failed to initialize BlockTreeTermsReader.")
if err != nil {
fmt.Println("DEBUG ", err)
}
// this.close() will close in:
util.CloseWhileSuppressingError(indexIn, fp)
}
}()
fp.version, err = fp.readHeader(fp.in)
if err != nil {
return nil, err
}
// log.Printf("Version: %v", fp.version)
if indexDivisor != -1 {
filename := util.SegmentFileName(info.Name, segmentSuffix, TERMS_INDEX_EXTENSION)
indexIn, err = dir.OpenInput(filename, ctx)
if err != nil {
return nil, err
}
indexVersion, err := fp.readIndexHeader(indexIn)
if err != nil {
return nil, err
}
// log.Printf("Index version: %v", indexVersion)
if int(indexVersion) != fp.version {
return nil, errors.New(fmt.Sprintf("mixmatched version files: %v=%v,%v=%v", fp.in, fp.version, indexIn, indexVersion))
}
}
// verify
if indexIn != nil && fp.version >= TERMS_VERSION_CURRENT {
if _, err = store.ChecksumEntireFile(indexIn); err != nil {
return nil, err
}
}
// Have PostingsReader init itself
postingsReader.Init(fp.in)
if fp.version >= TERMS_VERSION_CHECKSUM {
// NOTE: data file is too costly to verify checksum against all the
// bytes on open, but for now we at least verify proper structure
// of the checksum footer: which looks for FOOTER_MAGIC +
// algorithmID. This is cheap and can detect some forms of
// corruption such as file trucation.
if _, err = codec.RetrieveChecksum(fp.in); err != nil {
return nil, err
}
}
// Read per-field details
fp.seekDir(fp.in, fp.dirOffset)
if indexDivisor != -1 {
fp.seekDir(indexIn, fp.indexDirOffset)
}
numFields, err := fp.in.ReadVInt()
if err != nil {
return nil, err
}
// log.Printf("Fields number: %v", numFields)
if numFields < 0 {
return nil, errors.New(fmt.Sprintf("invalid numFields: %v (resource=%v)", numFields, fp.in))
}
for i := int32(0); i < numFields; i++ {
// log.Printf("Next field...")
field, err := fp.in.ReadVInt()
if err != nil {
return nil, err
}
// log.Printf("Field: %v", field)
numTerms, err := fp.in.ReadVLong()
if err != nil {
return nil, err
}
//.........這裏部分代碼省略.........
示例14: newLucene41PostingsWriter
/* Creates a postings writer with the specified PackedInts overhead ratio */
func newLucene41PostingsWriter(state *SegmentWriteState,
accetableOverheadRatio float32) (*Lucene41PostingsWriter, error) {
docOut, err := state.Directory.CreateOutput(
util.SegmentFileName(state.SegmentInfo.Name,
state.SegmentSuffix,
LUCENE41_DOC_EXTENSION),
state.Context)
if err != nil {
return nil, err
}
ans := new(Lucene41PostingsWriter)
if err = func() error {
var posOut store.IndexOutput
var payOut store.IndexOutput
var success = false
defer func() {
if !success {
util.CloseWhileSuppressingError(docOut, posOut, payOut)
}
}()
err := codec.WriteHeader(docOut, LUCENE41_DOC_CODEC, LUCENE41_VERSION_CURRENT)
if err != nil {
return err
}
ans.forUtil, err = NewForUtilInto(accetableOverheadRatio, docOut)
if err != nil {
return err
}
if state.FieldInfos.HasProx {
ans.posDeltaBuffer = make([]int, MAX_DATA_SIZE)
posOut, err = state.Directory.CreateOutput(util.SegmentFileName(
state.SegmentInfo.Name, state.SegmentSuffix, LUCENE41_POS_EXTENSION),
state.Context)
if err != nil {
return err
}
err = codec.WriteHeader(posOut, LUCENE41_POS_CODEC, LUCENE41_VERSION_CURRENT)
if err != nil {
return err
}
if state.FieldInfos.HasPayloads {
ans.payloadBytes = make([]byte, 128)
ans.payloadLengthBuffer = make([]int, MAX_DATA_SIZE)
}
if state.FieldInfos.HasOffsets {
ans.offsetStartDeltaBuffer = make([]int, MAX_DATA_SIZE)
ans.offsetLengthBuffer = make([]int, MAX_DATA_SIZE)
}
if state.FieldInfos.HasPayloads || state.FieldInfos.HasOffsets {
payOut, err = state.Directory.CreateOutput(util.SegmentFileName(
state.SegmentInfo.Name, state.SegmentSuffix, LUCENE41_PAY_EXTENSION),
state.Context)
if err != nil {
return err
}
err = codec.WriteHeader(payOut, LUCENE41_PAY_CODEC, LUCENE41_VERSION_CURRENT)
}
}
ans.payOut, ans.posOut = payOut, posOut
ans.docOut = docOut
success = true
return nil
}(); err != nil {
return nil, err
}
ans.docDeltaBuffer = make([]int, MAX_DATA_SIZE)
ans.freqBuffer = make([]int, MAX_DATA_SIZE)
ans.encoded = make([]byte, MAX_ENCODED_SIZE)
// TODO: should we try skipping every 2/4 blocks...?
ans.skipWriter = NewSkipWriter(
maxSkipLevels,
LUCENE41_BLOCK_SIZE,
state.SegmentInfo.DocCount(),
ans.docOut,
ans.posOut,
ans.payOut)
return ans, nil
}
示例15:
const (
LUCENE40_SI_EXTENSION = "si"
LUCENE40_CODEC_NAME = "Lucene40SegmentInfo"
LUCENE40_VERSION_START = 0
LUCENE40_VERSION_CURRENT = LUCENE40_VERSION_START
SEGMENT_INFO_YES = 1
)
// lucene40/Lucene40SegmentInfoReader.java
var Lucene40SegmentInfoReader = func(dir store.Directory,
segment string, context store.IOContext) (si *model.SegmentInfo, err error) {
si = new(model.SegmentInfo)
fileName := util.SegmentFileName(segment, "", LUCENE40_SI_EXTENSION)
input, err := dir.OpenInput(fileName, context)
if err != nil {
return si, err
}
success := false
defer func() {
if !success {
util.CloseWhileSuppressingError(input)
} else {
input.Close()
}
}()
_, err = codec.CheckHeader(input, LUCENE40_CODEC_NAME, LUCENE40_VERSION_START, LUCENE40_VERSION_CURRENT)