本文整理匯總了Java中org.apache.lucene.document.TextField.TYPE_STORED屬性的典型用法代碼示例。如果您正苦於以下問題:Java TextField.TYPE_STORED屬性的具體用法?Java TextField.TYPE_STORED怎麽用?Java TextField.TYPE_STORED使用的例子?那麽, 這裏精選的屬性代碼示例或許可以為您提供幫助。您也可以進一步了解該屬性所在類org.apache.lucene.document.TextField
的用法示例。
在下文中一共展示了TextField.TYPE_STORED屬性的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: indexOneDoc
private IndexReader indexOneDoc(Directory dir, String field, String value, Analyzer analyzer) throws IOException {
IndexWriterConfig iwc = newIndexWriterConfig(analyzer);
iwc.setMergePolicy(newLogMergePolicy());
RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
FieldType ft = new FieldType(TextField.TYPE_STORED);
ft.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
Field textField = new Field(field, "", ft);
Document doc = new Document();
doc.add(textField);
textField.setStringValue(value);
iw.addDocument(doc);
IndexReader ir = iw.getReader();
iw.close();
return ir;
}
示例2: testVectorHighlighter
public void testVectorHighlighter() throws Exception {
Directory dir = new RAMDirectory();
IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.STANDARD_ANALYZER));
Document document = new Document();
document.add(new TextField("_id", "1", Field.Store.YES));
FieldType vectorsType = new FieldType(TextField.TYPE_STORED);
vectorsType.setStoreTermVectors(true);
vectorsType.setStoreTermVectorPositions(true);
vectorsType.setStoreTermVectorOffsets(true);
document.add(new Field("content", "the big bad dog", vectorsType));
indexWriter.addDocument(document);
IndexReader reader = DirectoryReader.open(indexWriter);
IndexSearcher searcher = new IndexSearcher(reader);
TopDocs topDocs = searcher.search(new TermQuery(new Term("_id", "1")), 1);
assertThat(topDocs.totalHits, equalTo(1));
FastVectorHighlighter highlighter = new FastVectorHighlighter();
String fragment = highlighter.getBestFragment(highlighter.getFieldQuery(new TermQuery(new Term("content", "bad"))),
reader, topDocs.scoreDocs[0].doc, "content", 30);
assertThat(fragment, notNullValue());
assertThat(fragment, equalTo("the big <b>bad</b> dog"));
}
示例3: getDocument
private Document getDocument(File file) throws IOException {
Document document = new Document();
// index file contents
Field contentField = new Field(LuceneConstants.CONTENTS, new FileReader(file), TextField.TYPE_NOT_STORED);
// index file name
Field fileNameField = new Field(LuceneConstants.FILE_NAME, file.getName(), TextField.TYPE_STORED);
// index file path
Field filePathField = new Field(LuceneConstants.FILE_PATH, file.getCanonicalPath(), TextField.TYPE_STORED);
document.add(contentField);
document.add(fileNameField);
document.add(filePathField);
return document;
}
示例4: testEmptyDocAfterFlushingRealDoc
public void testEmptyDocAfterFlushingRealDoc() throws IOException {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
Document doc = new Document();
FieldType customType = new FieldType(TextField.TYPE_STORED);
customType.setStoreTermVectors(true);
customType.setStoreTermVectorPositions(true);
customType.setStoreTermVectorOffsets(true);
doc.add(newField("field", "aaa", customType));
writer.addDocument(doc);
writer.commit();
if (VERBOSE) {
System.out.println("\nTEST: now add empty doc");
}
writer.addDocument(new Document());
writer.close();
IndexReader reader = DirectoryReader.open(dir);
assertEquals(2, reader.numDocs());
reader.close();
dir.close();
}
示例5: writeEmptyTermVector
private void writeEmptyTermVector(TermVectorsResponse outResponse) throws IOException {
Directory dir = newDirectory();
IndexWriterConfig conf = new IndexWriterConfig(new StandardAnalyzer());
conf.setOpenMode(OpenMode.CREATE);
IndexWriter writer = new IndexWriter(dir, conf);
FieldType type = new FieldType(TextField.TYPE_STORED);
type.setStoreTermVectorOffsets(true);
type.setStoreTermVectorPayloads(false);
type.setStoreTermVectorPositions(true);
type.setStoreTermVectors(true);
type.freeze();
Document d = new Document();
d.add(new Field("id", "abc", StringField.TYPE_STORED));
writer.updateDocument(new Term("id", "abc"), d);
writer.commit();
writer.close();
DirectoryReader dr = DirectoryReader.open(dir);
IndexSearcher s = new IndexSearcher(dr);
TopDocs search = s.search(new TermQuery(new Term("id", "abc")), 1);
ScoreDoc[] scoreDocs = search.scoreDocs;
int doc = scoreDocs[0].doc;
Fields fields = dr.getTermVectors(doc);
EnumSet<Flag> flags = EnumSet.of(Flag.Positions, Flag.Offsets);
outResponse.setFields(fields, null, flags, fields);
outResponse.setExists(true);
dr.close();
dir.close();
}
示例6: writeStandardTermVector
private void writeStandardTermVector(TermVectorsResponse outResponse) throws IOException {
Directory dir = newDirectory();
IndexWriterConfig conf = new IndexWriterConfig(new StandardAnalyzer());
conf.setOpenMode(OpenMode.CREATE);
IndexWriter writer = new IndexWriter(dir, conf);
FieldType type = new FieldType(TextField.TYPE_STORED);
type.setStoreTermVectorOffsets(true);
type.setStoreTermVectorPayloads(false);
type.setStoreTermVectorPositions(true);
type.setStoreTermVectors(true);
type.freeze();
Document d = new Document();
d.add(new Field("id", "abc", StringField.TYPE_STORED));
d.add(new Field("title", "the1 quick brown fox jumps over the1 lazy dog", type));
d.add(new Field("desc", "the1 quick brown fox jumps over the1 lazy dog", type));
writer.updateDocument(new Term("id", "abc"), d);
writer.commit();
writer.close();
DirectoryReader dr = DirectoryReader.open(dir);
IndexSearcher s = new IndexSearcher(dr);
TopDocs search = s.search(new TermQuery(new Term("id", "abc")), 1);
ScoreDoc[] scoreDocs = search.scoreDocs;
int doc = scoreDocs[0].doc;
Fields termVectors = dr.getTermVectors(doc);
EnumSet<Flag> flags = EnumSet.of(Flag.Positions, Flag.Offsets);
outResponse.setFields(termVectors, null, flags, termVectors);
dr.close();
dir.close();
}
示例7: getDocument
private Document getDocument(File file) throws IOException {
Document document = new Document();
Field contentsField = new TextField(LuceneConstants.CONTENTS, new String(Files.readAllBytes(file.toPath())), Store.YES);
Field fileNameField = new Field(LuceneConstants.FILE_NAME, file.getName(), TextField.TYPE_STORED);
Field filePathField = new Field(LuceneConstants.FILE_PATH, file.getCanonicalPath(), TextField.TYPE_STORED);
document.add(contentsField);
document.add(fileNameField);
document.add(filePathField);
return document;
}
示例8: writeStandardTermVector
private void writeStandardTermVector(TermVectorsResponse outResponse) throws IOException {
Directory dir = LuceneTestCase.newDirectory();
IndexWriterConfig conf = new IndexWriterConfig(new StandardAnalyzer());
conf.setOpenMode(OpenMode.CREATE);
IndexWriter writer = new IndexWriter(dir, conf);
FieldType type = new FieldType(TextField.TYPE_STORED);
type.setStoreTermVectorOffsets(true);
type.setStoreTermVectorPayloads(false);
type.setStoreTermVectorPositions(true);
type.setStoreTermVectors(true);
type.freeze();
Document d = new Document();
d.add(new Field("id", "abc", StringField.TYPE_STORED));
d.add(new Field("plaintext", "the1 quick brown fox jumps over the1 lazy dog comment", type));
d.add(new Field("desc", "the1 quick brown fox jumps over the1 lazy dog comment", type));
writer.updateDocument(new Term("id", "abc"), d);
writer.commit();
writer.close();
DirectoryReader dr = DirectoryReader.open(dir);
IndexSearcher s = new IndexSearcher(dr);
TopDocs search = s.search(new TermQuery(new Term("id", "abc")), 1);
ScoreDoc[] scoreDocs = search.scoreDocs;
int doc = scoreDocs[0].doc;
Fields termVectors = dr.getTermVectors(doc);
EnumSet<Flag> flags = EnumSet.of(Flag.Positions, Flag.Offsets);
outResponse.setFields(termVectors, null, flags, termVectors);
dr.close();
dir.close();
}
示例9: testBuddhism
public void testBuddhism() throws Exception {
String text = "This eight-volume set brings together seminal papers in Buddhist studies from a vast " +
"range of academic disciplines published over the last forty years. With a new introduction " +
"by the editor, this collection is a unique and unrivalled research resource for both " +
"student and scholar. Coverage includes: - Buddhist origins; early history of Buddhism in " +
"South and Southeast Asia - early Buddhist Schools and Doctrinal History; Theravada Doctrine " +
"- the Origins and nature of Mahayana Buddhism; some Mahayana religious topics - Abhidharma " +
"and Madhyamaka - Yogacara, the Epistemological tradition, and Tathagatagarbha - Tantric " +
"Buddhism (Including China and Japan); Buddhism in Nepal and Tibet - Buddhism in South and " +
"Southeast Asia, and - Buddhism in China, East Asia, and Japan.";
Directory dir = newDirectory();
Analyzer analyzer = new MockAnalyzer(random(), MockTokenizer.SIMPLE, true);
RandomIndexWriter iw = new RandomIndexWriter(random(), dir, analyzer);
FieldType positionsType = new FieldType(TextField.TYPE_STORED);
positionsType.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
Field body = new Field("body", text, positionsType);
Document document = new Document();
document.add(body);
iw.addDocument(document);
IndexReader ir = iw.getReader();
iw.close();
IndexSearcher searcher = newSearcher(ir);
PhraseQuery query = new PhraseQuery();
query.add(new Term("body", "buddhist"));
query.add(new Term("body", "origins"));
TopDocs topDocs = searcher.search(query, 10);
assertEquals(1, topDocs.totalHits);
PostingsHighlighter highlighter = new PostingsHighlighter();
String snippets[] = highlighter.highlight("body", query, searcher, topDocs, 2);
assertEquals(1, snippets.length);
assertTrue(snippets[0].contains("<b>Buddhist</b> <b>origins</b>"));
ir.close();
dir.close();
}
示例10: testNoSegmentFile
public void testNoSegmentFile() throws IOException {
BaseDirectoryWrapper dir = newDirectory();
dir.setLockFactory(NoLockFactory.getNoLockFactory());
IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random()))
.setMaxBufferedDocs(2));
Document doc = new Document();
FieldType customType = new FieldType(TextField.TYPE_STORED);
customType.setStoreTermVectors(true);
customType.setStoreTermVectorPositions(true);
customType.setStoreTermVectorOffsets(true);
doc.add(newField("c", "val", customType));
w.addDocument(doc);
w.addDocument(doc);
IndexWriter w2 = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random()))
.setMaxBufferedDocs(2)
.setOpenMode(OpenMode.CREATE));
w2.close();
// If we don't do that, the test fails on Windows
w.rollback();
// This test leaves only segments.gen, which causes
// DirectoryReader.indexExists to return true:
dir.setCheckIndexOnClose(false);
dir.close();
}
示例11: setUp
@Override
@Before
public void setUp() throws Exception {
super.setUp();
dir = newDirectory();
RandomIndexWriter indexWriter = new RandomIndexWriter(random(), dir);
FieldType ft = new FieldType(TextField.TYPE_STORED);
ft.setStoreTermVectors(true);
ft.setStoreTermVectorOffsets(true);
ft.setStoreTermVectorPositions(true);
Analyzer analyzer = new MockAnalyzer(random());
Document doc;
for (int i = 0; i < 10; i++) {
doc = new Document();
doc.add(new Field("id", Integer.toString(i), ft));
doc.add(new Field("text", random().nextInt(10) + " " + random().nextInt(10) + " " + random().nextInt(10), ft));
indexWriter.addDocument(doc, analyzer);
}
indexWriter.commit();
index = indexWriter.getReader();
indexWriter.close();
}
示例12: testEmptyHighlights
/** Make sure highlighter returns first N sentences if
* there were no hits. */
public void testEmptyHighlights() throws Exception {
Directory dir = newDirectory();
IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random()));
iwc.setMergePolicy(newLogMergePolicy());
RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
FieldType offsetsType = new FieldType(TextField.TYPE_STORED);
offsetsType.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
Document doc = new Document();
Field body = new Field("body", "test this is. another sentence this test has. far away is that planet.", offsetsType);
doc.add(body);
iw.addDocument(doc);
IndexReader ir = iw.getReader();
iw.close();
IndexSearcher searcher = newSearcher(ir);
PostingsHighlighter highlighter = new PostingsHighlighter();
Query query = new TermQuery(new Term("body", "highlighting"));
int[] docIDs = new int[] {0};
String snippets[] = highlighter.highlightFields(new String[] {"body"}, query, searcher, docIDs, new int[] { 2 }).get("body");
assertEquals(1, snippets.length);
assertEquals("test this is. another sentence this test has. ", snippets[0]);
ir.close();
dir.close();
}
示例13: addNoProxDoc
private void addNoProxDoc(IndexWriter writer) throws IOException {
Document doc = new Document();
FieldType customType = new FieldType(TextField.TYPE_STORED);
customType.setIndexOptions(IndexOptions.DOCS_ONLY);
Field f = new Field("content3", "aaa", customType);
doc.add(f);
FieldType customType2 = new FieldType();
customType2.setStored(true);
customType2.setIndexOptions(IndexOptions.DOCS_ONLY);
f = new Field("content4", "aaa", customType2);
doc.add(f);
writer.addDocument(doc);
}
示例14: testCustomB
/** sets b=0 to disable passage length normalization */
public void testCustomB() throws Exception {
Directory dir = newDirectory();
IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random(), MockTokenizer.SIMPLE, true));
iwc.setMergePolicy(newLogMergePolicy());
RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
FieldType offsetsType = new FieldType(TextField.TYPE_STORED);
offsetsType.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
Field body = new Field("body", "", offsetsType);
Document doc = new Document();
doc.add(body);
body.setStringValue("This is a test. This test is a better test but the sentence is excruiatingly long, " +
"you have no idea how painful it was for me to type this long sentence into my IDE.");
iw.addDocument(doc);
IndexReader ir = iw.getReader();
iw.close();
IndexSearcher searcher = newSearcher(ir);
PostingsHighlighter highlighter = new PostingsHighlighter(10000) {
@Override
protected PassageScorer getScorer(String field) {
return new PassageScorer(1.2f, 0, 87);
}
};
Query query = new TermQuery(new Term("body", "test"));
TopDocs topDocs = searcher.search(query, null, 10, Sort.INDEXORDER);
assertEquals(1, topDocs.totalHits);
String snippets[] = highlighter.highlight("body", query, searcher, topDocs, 1);
assertEquals(1, snippets.length);
assertTrue(snippets[0].startsWith("This <b>test</b> is a better <b>test</b>"));
ir.close();
dir.close();
}
示例15: testTermVectors
public void testTermVectors() throws Exception {
Directory d = newDirectory();
// set up writer
IndexWriter writer = new IndexWriter(
d,
newIndexWriterConfig(new MockAnalyzer(random()))
.setMergePolicy(newLogMergePolicy())
);
// want to get some more segments here
// new termvector fields
int mergeFactor = ((LogMergePolicy) writer.getConfig().getMergePolicy()).getMergeFactor();
FieldType customType5 = new FieldType(TextField.TYPE_STORED);
customType5.setStoreTermVectors(true);
FieldType customType6 = new FieldType(TextField.TYPE_STORED);
customType6.setStoreTermVectors(true);
customType6.setStoreTermVectorOffsets(true);
FieldType customType7 = new FieldType(TextField.TYPE_STORED);
customType7.setStoreTermVectors(true);
customType7.setStoreTermVectorPositions(true);
FieldType customType8 = new FieldType(TextField.TYPE_STORED);
customType8.setStoreTermVectors(true);
customType8.setStoreTermVectorOffsets(true);
customType8.setStoreTermVectorPositions(true);
for (int i = 0; i < 5 * mergeFactor; i++) {
Document doc = new Document();
doc.add(new TextField("tvnot", "one two two three three three", Field.Store.YES));
doc.add(new Field("termvector", "one two two three three three", customType5));
doc.add(new Field("tvoffset", "one two two three three three", customType6));
doc.add(new Field("tvposition", "one two two three three three", customType7));
doc.add(new Field("tvpositionoffset", "one two two three three three", customType8));
writer.addDocument(doc);
}
writer.close();
d.close();
}