本文整理汇总了Java中org.apache.lucene.document.StringField.TYPE_NOT_STORED属性的典型用法代码示例。如果您正苦于以下问题:Java StringField.TYPE_NOT_STORED属性的具体用法?Java StringField.TYPE_NOT_STORED怎么用?Java StringField.TYPE_NOT_STORED使用的例子?那么恭喜您, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在类org.apache.lucene.document.StringField
的用法示例。
在下文中一共展示了StringField.TYPE_NOT_STORED属性的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: update
public void update(IndexWriter writer) throws IOException {
// Add 10 docs:
FieldType customType = new FieldType(StringField.TYPE_NOT_STORED);
customType.setStoreTermVectors(true);
for(int j=0; j<10; j++) {
Document d = new Document();
int n = random().nextInt();
d.add(newField("id", Integer.toString(nextID++), customType));
d.add(newTextField("contents", English.intToEnglish(n), Field.Store.NO));
writer.addDocument(d);
}
// Delete 5 docs:
int deleteID = nextID-1;
for(int j=0; j<5; j++) {
writer.deleteDocuments(new Term("id", ""+deleteID));
deleteID -= 2;
}
}
示例2: setFieldType
/** Used to customize the indexing options of the 4 number fields, and to a lesser degree the XDL field too. Search
* requires indexed=true, and relevancy requires docValues. If these features aren't needed then disable them.
* {@link FieldType#freeze()} is called on the argument. */
public void setFieldType(FieldType fieldType) {
fieldType.freeze();
this.fieldType = fieldType;
//only double's supported right now
if (fieldType.numericType() != FieldType.NumericType.DOUBLE)
throw new IllegalArgumentException("BBoxStrategy only supports doubles at this time.");
//for xdlFieldType, copy some similar options. Don't do docValues since it isn't needed here.
xdlFieldType = new FieldType(StringField.TYPE_NOT_STORED);
xdlFieldType.setStored(fieldType.stored());
xdlFieldType.setIndexed(fieldType.indexed());
xdlFieldType.freeze();
}
示例3: addGram
private static void addGram(String text, Document doc, int ng1, int ng2) {
int len = text.length();
for (int ng = ng1; ng <= ng2; ng++) {
String key = "gram" + ng;
String end = null;
for (int i = 0; i < len - ng + 1; i++) {
String gram = text.substring(i, i + ng);
FieldType ft = new FieldType(StringField.TYPE_NOT_STORED);
ft.setIndexOptions(IndexOptions.DOCS_AND_FREQS);
Field ngramField = new Field(key, gram, ft);
// spellchecker does not use positional queries, but we want freqs
// for scoring these multivalued n-gram fields.
doc.add(ngramField);
if (i == 0) {
// only one term possible in the startXXField, TF/pos and norms aren't needed.
Field startField = new StringField("start" + ng, gram, Field.Store.NO);
doc.add(startField);
}
end = gram;
}
if (end != null) { // may not be present if len==ng1
// only one term possible in the endXXField, TF/pos and norms aren't needed.
Field endField = new StringField("end" + ng, end, Field.Store.NO);
doc.add(endField);
}
}
}
示例4: testTermVectorCorruption2
public void testTermVectorCorruption2() throws IOException {
Directory dir = newDirectory();
for(int iter=0;iter<2;iter++) {
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random()))
.setMaxBufferedDocs(2)
.setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH)
.setMergeScheduler(new SerialMergeScheduler())
.setMergePolicy(new LogDocMergePolicy()));
Document document = new Document();
FieldType customType = new FieldType();
customType.setStored(true);
Field storedField = newField("stored", "stored", customType);
document.add(storedField);
writer.addDocument(document);
writer.addDocument(document);
document = new Document();
document.add(storedField);
FieldType customType2 = new FieldType(StringField.TYPE_NOT_STORED);
customType2.setStoreTermVectors(true);
customType2.setStoreTermVectorPositions(true);
customType2.setStoreTermVectorOffsets(true);
Field termVectorField = newField("termVector", "termVector", customType2);
document.add(termVectorField);
writer.addDocument(document);
writer.forceMerge(1);
writer.close();
IndexReader reader = DirectoryReader.open(dir);
assertNull(reader.getTermVectors(0));
assertNull(reader.getTermVectors(1));
assertNotNull(reader.getTermVectors(2));
reader.close();
}
dir.close();
}
示例5: testNoTermVectorAfterTermVector
public void testNoTermVectorAfterTermVector() throws IOException {
Directory dir = newDirectory();
IndexWriter iw = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
Document document = new Document();
FieldType customType2 = new FieldType(StringField.TYPE_NOT_STORED);
customType2.setStoreTermVectors(true);
customType2.setStoreTermVectorPositions(true);
customType2.setStoreTermVectorOffsets(true);
document.add(newField("tvtest", "a b c", customType2));
iw.addDocument(document);
document = new Document();
document.add(newTextField("tvtest", "x y z", Field.Store.NO));
iw.addDocument(document);
// Make first segment
iw.commit();
FieldType customType = new FieldType(StringField.TYPE_NOT_STORED);
customType.setStoreTermVectors(true);
document = new Document();
document.add(newField("tvtest", "a b c", customType));
iw.addDocument(document);
// Make 2nd segment
iw.commit();
iw.forceMerge(1);
iw.close();
dir.close();
}
示例6: testNoTermVectorAfterTermVectorMerge
public void testNoTermVectorAfterTermVectorMerge() throws IOException {
Directory dir = newDirectory();
IndexWriter iw = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
Document document = new Document();
FieldType customType = new FieldType(StringField.TYPE_NOT_STORED);
customType.setStoreTermVectors(true);
document.add(newField("tvtest", "a b c", customType));
iw.addDocument(document);
iw.commit();
document = new Document();
document.add(newTextField("tvtest", "x y z", Field.Store.NO));
iw.addDocument(document);
// Make first segment
iw.commit();
iw.forceMerge(1);
FieldType customType2 = new FieldType(StringField.TYPE_NOT_STORED);
customType2.setStoreTermVectors(true);
document.add(newField("tvtest", "a b c", customType2));
document = new Document();
iw.addDocument(document);
// Make 2nd segment
iw.commit();
iw.forceMerge(1);
iw.close();
dir.close();
}
示例7: testPostings
/** tests terms with different probabilities of being in the document.
* depends heavily on term vectors cross-check at checkIndex
*/
public void testPostings() throws Exception {
Directory dir = newFSDirectory(createTempDir("postings"));
IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random()));
iwc.setCodec(Codec.forName("Lucene40"));
RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
Document doc = new Document();
// id field
FieldType idType = new FieldType(StringField.TYPE_NOT_STORED);
idType.setStoreTermVectors(true);
Field idField = new Field("id", "", idType);
doc.add(idField);
// title field: short text field
FieldType titleType = new FieldType(TextField.TYPE_NOT_STORED);
titleType.setStoreTermVectors(true);
titleType.setStoreTermVectorPositions(true);
titleType.setStoreTermVectorOffsets(true);
titleType.setIndexOptions(indexOptions());
Field titleField = new Field("title", "", titleType);
doc.add(titleField);
// body field: long text field
FieldType bodyType = new FieldType(TextField.TYPE_NOT_STORED);
bodyType.setStoreTermVectors(true);
bodyType.setStoreTermVectorPositions(true);
bodyType.setStoreTermVectorOffsets(true);
bodyType.setIndexOptions(indexOptions());
Field bodyField = new Field("body", "", bodyType);
doc.add(bodyField);
int numDocs = atLeast(1000);
for (int i = 0; i < numDocs; i++) {
idField.setStringValue(Integer.toString(i));
titleField.setStringValue(fieldValue(1));
bodyField.setStringValue(fieldValue(3));
iw.addDocument(doc);
if (random().nextInt(20) == 0) {
iw.deleteDocuments(new Term("id", Integer.toString(i)));
}
}
if (random().nextBoolean()) {
// delete 1-100% of docs
iw.deleteDocuments(new Term("title", terms[random().nextInt(terms.length)]));
}
iw.close();
dir.close(); // checkindex
}
示例8: fieldType
@Override
public IndexableFieldType fieldType() {
return StringField.TYPE_NOT_STORED;
}
示例9: testDoubleOffsetCounting
public void testDoubleOffsetCounting() throws Exception {
Directory dir = newDirectory();
IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
Document doc = new Document();
FieldType customType = new FieldType(StringField.TYPE_NOT_STORED);
customType.setStoreTermVectors(true);
customType.setStoreTermVectorPositions(true);
customType.setStoreTermVectorOffsets(true);
Field f = newField("field", "abcd", customType);
doc.add(f);
doc.add(f);
Field f2 = newField("field", "", customType);
doc.add(f2);
doc.add(f);
w.addDocument(doc);
w.close();
IndexReader r = DirectoryReader.open(dir);
Terms vector = r.getTermVectors(0).terms("field");
assertNotNull(vector);
TermsEnum termsEnum = vector.iterator(null);
assertNotNull(termsEnum.next());
assertEquals("", termsEnum.term().utf8ToString());
// Token "" occurred once
assertEquals(1, termsEnum.totalTermFreq());
DocsAndPositionsEnum dpEnum = termsEnum.docsAndPositions(null, null);
assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
dpEnum.nextPosition();
assertEquals(8, dpEnum.startOffset());
assertEquals(8, dpEnum.endOffset());
assertEquals(DocIdSetIterator.NO_MORE_DOCS, dpEnum.nextDoc());
// Token "abcd" occurred three times
assertEquals(new BytesRef("abcd"), termsEnum.next());
dpEnum = termsEnum.docsAndPositions(null, dpEnum);
assertEquals(3, termsEnum.totalTermFreq());
assertTrue(dpEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
dpEnum.nextPosition();
assertEquals(0, dpEnum.startOffset());
assertEquals(4, dpEnum.endOffset());
dpEnum.nextPosition();
assertEquals(4, dpEnum.startOffset());
assertEquals(8, dpEnum.endOffset());
dpEnum.nextPosition();
assertEquals(8, dpEnum.startOffset());
assertEquals(12, dpEnum.endOffset());
assertEquals(DocIdSetIterator.NO_MORE_DOCS, dpEnum.nextDoc());
assertNull(termsEnum.next());
r.close();
dir.close();
}
示例10: testTermVectorCorruption
public void testTermVectorCorruption() throws IOException {
Directory dir = newDirectory();
for(int iter=0;iter<2;iter++) {
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random()))
.setMaxBufferedDocs(2)
.setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH)
.setMergeScheduler(new SerialMergeScheduler())
.setMergePolicy(new LogDocMergePolicy()));
Document document = new Document();
FieldType customType = new FieldType();
customType.setStored(true);
Field storedField = newField("stored", "stored", customType);
document.add(storedField);
writer.addDocument(document);
writer.addDocument(document);
document = new Document();
document.add(storedField);
FieldType customType2 = new FieldType(StringField.TYPE_NOT_STORED);
customType2.setStoreTermVectors(true);
customType2.setStoreTermVectorPositions(true);
customType2.setStoreTermVectorOffsets(true);
Field termVectorField = newField("termVector", "termVector", customType2);
document.add(termVectorField);
writer.addDocument(document);
writer.forceMerge(1);
writer.close();
IndexReader reader = DirectoryReader.open(dir);
for(int i=0;i<reader.numDocs();i++) {
reader.document(i);
reader.getTermVectors(i);
}
reader.close();
writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random()))
.setMaxBufferedDocs(2)
.setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH)
.setMergeScheduler(new SerialMergeScheduler())
.setMergePolicy(new LogDocMergePolicy()));
Directory[] indexDirs = {new MockDirectoryWrapper(random(), new RAMDirectory(dir, newIOContext(random())))};
writer.addIndexes(indexDirs);
writer.forceMerge(1);
writer.close();
}
dir.close();
}
示例11: testTermVectorCorruption3
public void testTermVectorCorruption3() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random()))
.setMaxBufferedDocs(2)
.setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH)
.setMergeScheduler(new SerialMergeScheduler())
.setMergePolicy(new LogDocMergePolicy()));
Document document = new Document();
FieldType customType = new FieldType();
customType.setStored(true);
Field storedField = newField("stored", "stored", customType);
document.add(storedField);
FieldType customType2 = new FieldType(StringField.TYPE_NOT_STORED);
customType2.setStoreTermVectors(true);
customType2.setStoreTermVectorPositions(true);
customType2.setStoreTermVectorOffsets(true);
Field termVectorField = newField("termVector", "termVector", customType2);
document.add(termVectorField);
for(int i=0;i<10;i++)
writer.addDocument(document);
writer.close();
writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random()))
.setMaxBufferedDocs(2)
.setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH)
.setMergeScheduler(new SerialMergeScheduler())
.setMergePolicy(new LogDocMergePolicy()));
for(int i=0;i<6;i++)
writer.addDocument(document);
writer.forceMerge(1);
writer.close();
IndexReader reader = DirectoryReader.open(dir);
for(int i=0;i<10;i++) {
reader.getTermVectors(i);
reader.document(i);
}
reader.close();
dir.close();
}