本文整理汇总了Java中org.apache.lucene.codecs.Codec.getDefault方法的典型用法代码示例。如果您正苦于以下问题:Java Codec.getDefault方法的具体用法?Java Codec.getDefault怎么用?Java Codec.getDefault使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.lucene.codecs.Codec
的用法示例。
在下文中一共展示了Codec.getDefault方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: write
import org.apache.lucene.codecs.Codec; //导入方法依赖的package包/类
private void write(final FieldInfos fieldInfos, final Directory dir, final FieldData[] fields, boolean allowPreFlex) throws Throwable {
final int termIndexInterval = TestUtil.nextInt(random(), 13, 27);
final Codec codec = Codec.getDefault();
final SegmentInfo si = new SegmentInfo(dir, Version.LATEST, SEGMENT, 10000, false, codec, null);
final SegmentWriteState state = new SegmentWriteState(InfoStream.getDefault(), dir, si, fieldInfos, termIndexInterval, null, newIOContext(random()));
final FieldsConsumer consumer = codec.postingsFormat().fieldsConsumer(state);
Arrays.sort(fields);
for (final FieldData field : fields) {
if (!allowPreFlex && codec instanceof Lucene3xCodec) {
// code below expects unicode sort order
continue;
}
field.write(consumer);
}
consumer.close();
}
示例2: LiveIndexWriterConfig
import org.apache.lucene.codecs.Codec; //导入方法依赖的package包/类
LiveIndexWriterConfig(Analyzer analyzer, Version matchVersion) {
this.analyzer = analyzer;
this.matchVersion = matchVersion;
ramBufferSizeMB = IndexWriterConfig.DEFAULT_RAM_BUFFER_SIZE_MB;
maxBufferedDocs = IndexWriterConfig.DEFAULT_MAX_BUFFERED_DOCS;
maxBufferedDeleteTerms = IndexWriterConfig.DEFAULT_MAX_BUFFERED_DELETE_TERMS;
readerTermsIndexDivisor = IndexWriterConfig.DEFAULT_READER_TERMS_INDEX_DIVISOR;
mergedSegmentWarmer = null;
termIndexInterval = IndexWriterConfig.DEFAULT_TERM_INDEX_INTERVAL; // TODO: this should be private to the codec, not settable here
delPolicy = new KeepOnlyLastCommitDeletionPolicy();
commit = null;
useCompoundFile = IndexWriterConfig.DEFAULT_USE_COMPOUND_FILE_SYSTEM;
openMode = OpenMode.CREATE_OR_APPEND;
similarity = IndexSearcher.getDefaultSimilarity();
mergeScheduler = new ConcurrentMergeScheduler();
writeLockTimeout = IndexWriterConfig.WRITE_LOCK_TIMEOUT;
indexingChain = DocumentsWriterPerThread.defaultIndexingChain;
codec = Codec.getDefault();
if (codec == null) {
throw new NullPointerException();
}
infoStream = InfoStream.getDefault();
mergePolicy = new TieredMergePolicy();
flushPolicy = new FlushByRamOrCountsPolicy();
readerPooling = IndexWriterConfig.DEFAULT_READER_POOLING;
indexerThreadPool = new DocumentsWriterPerThreadPool(IndexWriterConfig.DEFAULT_MAX_THREAD_STATES);
perThreadHardLimitMB = IndexWriterConfig.DEFAULT_RAM_PER_THREAD_HARD_LIMIT_MB;
}
示例3: beforeClass
import org.apache.lucene.codecs.Codec; //导入方法依赖的package包/类
@BeforeClass
public static void beforeClass() throws Exception {
savedCodec = Codec.getDefault();
// currently only these codecs that support random access ordinals
int victim = random().nextInt(2);
switch(victim) {
case 0: Codec.setDefault(TestUtil.alwaysDocValuesFormat(new DirectDocValuesFormat()));
default: Codec.setDefault(TestUtil.alwaysDocValuesFormat(new Lucene410DocValuesFormat()));
}
}
示例4: storeTermVectorPayloads
import org.apache.lucene.codecs.Codec; //导入方法依赖的package包/类
@Override
public boolean storeTermVectorPayloads() {
if (Codec.getDefault() instanceof Lucene3xCodec) {
return false; // 3.x doesnt support
} else {
return storeTermVectors() && counter % 10 != 9;
}
}
示例5: setUp
import org.apache.lucene.codecs.Codec; //导入方法依赖的package包/类
public void setUp() throws Exception {
super.setUp();
// set the default codec, so adding test cases to this isn't fragile
savedCodec = Codec.getDefault();
Codec.setDefault(getCodec());
}
示例6: testRamBytesUsed
import org.apache.lucene.codecs.Codec; //导入方法依赖的package包/类
/** Test the accuracy of the ramBytesUsed estimations. */
public void testRamBytesUsed() throws IOException {
if (Codec.getDefault() instanceof RandomCodec) {
// this test relies on the fact that two segments will be written with
// the same codec so we need to disable MockRandomPF
final Set<String> avoidCodecs = new HashSet<>(((RandomCodec) Codec.getDefault()).avoidCodecs);
avoidCodecs.add(new MockRandomPostingsFormat().getName());
Codec.setDefault(new RandomCodec(random(), avoidCodecs));
}
Directory dir = newDirectory();
IndexWriterConfig cfg = newIndexWriterConfig(new MockAnalyzer(random()));
IndexWriter w = new IndexWriter(dir, cfg);
// we need to index enough documents so that constant overhead doesn't dominate
final int numDocs = atLeast(10000);
AtomicReader reader1 = null;
for (int i = 0; i < numDocs; ++i) {
Document d = new Document();
addRandomFields(d);
w.addDocument(d);
if (i == 100) {
w.forceMerge(1);
w.commit();
reader1 = getOnlySegmentReader(DirectoryReader.open(dir));
}
}
w.forceMerge(1);
w.commit();
w.close();
AtomicReader reader2 = getOnlySegmentReader(DirectoryReader.open(dir));
for (AtomicReader reader : Arrays.asList(reader1, reader2)) {
new SimpleMergedSegmentWarmer(InfoStream.NO_OUTPUT).warm(reader);
}
final long actualBytes = RamUsageTester.sizeOf(reader2, new Accumulator(reader2)) - RamUsageTester.sizeOf(reader1, new Accumulator(reader1));
final long expectedBytes = ((SegmentReader) reader2).ramBytesUsed() - ((SegmentReader) reader1).ramBytesUsed();
final long absoluteError = actualBytes - expectedBytes;
final double relativeError = (double) absoluteError / actualBytes;
final String message = "Actual RAM usage " + actualBytes + ", but got " + expectedBytes + ", " + 100*relativeError + "% error";
assertTrue(message, Math.abs(relativeError) < 0.20d || Math.abs(absoluteError) < 1000);
reader1.close();
reader2.close();
dir.close();
}
示例7: getCodec
import org.apache.lucene.codecs.Codec; //导入方法依赖的package包/类
@Override
protected Codec getCodec() {
return Codec.getDefault();
}
示例8: testFixedPostings
import org.apache.lucene.codecs.Codec; //导入方法依赖的package包/类
public void testFixedPostings() throws Throwable {
final int NUM_TERMS = 100;
final TermData[] terms = new TermData[NUM_TERMS];
for(int i=0;i<NUM_TERMS;i++) {
final int[] docs = new int[] {i};
final String text = Integer.toString(i, Character.MAX_RADIX);
terms[i] = new TermData(text, docs, null);
}
final FieldInfos.Builder builder = new FieldInfos.Builder();
final FieldData field = new FieldData("field", builder, terms, true, false);
final FieldData[] fields = new FieldData[] {field};
final FieldInfos fieldInfos = builder.finish();
final Directory dir = newDirectory();
this.write(fieldInfos, dir, fields, true);
Codec codec = Codec.getDefault();
final SegmentInfo si = new SegmentInfo(dir, Version.LATEST, SEGMENT, 10000, false, codec, null);
final FieldsProducer reader = codec.postingsFormat().fieldsProducer(new SegmentReadState(dir, si, fieldInfos, newIOContext(random()), DirectoryReader.DEFAULT_TERMS_INDEX_DIVISOR));
final Iterator<String> fieldsEnum = reader.iterator();
String fieldName = fieldsEnum.next();
assertNotNull(fieldName);
final Terms terms2 = reader.terms(fieldName);
assertNotNull(terms2);
final TermsEnum termsEnum = terms2.iterator(null);
DocsEnum docsEnum = null;
for(int i=0;i<NUM_TERMS;i++) {
final BytesRef term = termsEnum.next();
assertNotNull(term);
assertEquals(terms[i].text2, term.utf8ToString());
// do this twice to stress test the codec's reuse, ie,
// make sure it properly fully resets (rewinds) its
// internal state:
for(int iter=0;iter<2;iter++) {
docsEnum = TestUtil.docs(random(), termsEnum, null, docsEnum, DocsEnum.FLAG_NONE);
assertEquals(terms[i].docs[0], docsEnum.nextDoc());
assertEquals(DocIdSetIterator.NO_MORE_DOCS, docsEnum.nextDoc());
}
}
assertNull(termsEnum.next());
for(int i=0;i<NUM_TERMS;i++) {
assertEquals(termsEnum.seekCeil(new BytesRef(terms[i].text2)), TermsEnum.SeekStatus.FOUND);
}
assertFalse(fieldsEnum.hasNext());
reader.close();
dir.close();
}
示例9: testMerge
import org.apache.lucene.codecs.Codec; //导入方法依赖的package包/类
public void testMerge() throws IOException {
final Codec codec = Codec.getDefault();
final SegmentInfo si = new SegmentInfo(mergedDir, Version.LATEST, mergedSegment, -1, false, codec, null);
SegmentMerger merger = new SegmentMerger(Arrays.<AtomicReader>asList(reader1, reader2),
si, InfoStream.getDefault(), mergedDir, IndexWriterConfig.DEFAULT_TERM_INDEX_INTERVAL,
MergeState.CheckAbort.NONE, new FieldInfos.FieldNumbers(), newIOContext(random()), true);
MergeState mergeState = merger.merge();
int docsMerged = mergeState.segmentInfo.getDocCount();
assertTrue(docsMerged == 2);
//Should be able to open a new SegmentReader against the new directory
SegmentReader mergedReader = new SegmentReader(new SegmentCommitInfo(
new SegmentInfo(mergedDir, Version.LATEST, mergedSegment, docsMerged,
false, codec, null),
0, -1L, -1L, -1L),
DirectoryReader.DEFAULT_TERMS_INDEX_DIVISOR, newIOContext(random()));
assertTrue(mergedReader != null);
assertTrue(mergedReader.numDocs() == 2);
Document newDoc1 = mergedReader.document(0);
assertTrue(newDoc1 != null);
//There are 2 unstored fields on the document
assertTrue(DocHelper.numFields(newDoc1) == DocHelper.numFields(doc1) - DocHelper.unstored.size());
Document newDoc2 = mergedReader.document(1);
assertTrue(newDoc2 != null);
assertTrue(DocHelper.numFields(newDoc2) == DocHelper.numFields(doc2) - DocHelper.unstored.size());
DocsEnum termDocs = TestUtil.docs(random(), mergedReader,
DocHelper.TEXT_FIELD_2_KEY,
new BytesRef("field"),
MultiFields.getLiveDocs(mergedReader),
null,
0);
assertTrue(termDocs != null);
assertTrue(termDocs.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
int tvCount = 0;
for(FieldInfo fieldInfo : mergedReader.getFieldInfos()) {
if (fieldInfo.hasVectors()) {
tvCount++;
}
}
//System.out.println("stored size: " + stored.size());
assertEquals("We do not have 3 fields that were indexed with term vector", 3, tvCount);
Terms vector = mergedReader.getTermVectors(0).terms(DocHelper.TEXT_FIELD_2_KEY);
assertNotNull(vector);
assertEquals(3, vector.size());
TermsEnum termsEnum = vector.iterator(null);
int i = 0;
while (termsEnum.next() != null) {
String term = termsEnum.term().utf8ToString();
int freq = (int) termsEnum.totalTermFreq();
//System.out.println("Term: " + term + " Freq: " + freq);
assertTrue(DocHelper.FIELD_2_TEXT.indexOf(term) != -1);
assertTrue(DocHelper.FIELD_2_FREQS[i] == freq);
i++;
}
TestSegmentReader.checkNorms(mergedReader);
mergedReader.close();
}