本文整理汇总了Java中org.apache.lucene.document.FieldType类的典型用法代码示例。如果您正苦于以下问题:Java FieldType类的具体用法?Java FieldType怎么用?Java FieldType使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
FieldType类属于org.apache.lucene.document包,在下文中一共展示了FieldType类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: termVectorOptionsToString
import org.apache.lucene.document.FieldType; //导入依赖的package包/类
public static String termVectorOptionsToString(FieldType fieldType) {
if (!fieldType.storeTermVectors()) {
return "no";
} else if (!fieldType.storeTermVectorOffsets() && !fieldType.storeTermVectorPositions()) {
return "yes";
} else if (fieldType.storeTermVectorOffsets() && !fieldType.storeTermVectorPositions()) {
return "with_offsets";
} else {
StringBuilder builder = new StringBuilder("with");
if (fieldType.storeTermVectorPositions()) {
builder.append("_positions");
}
if (fieldType.storeTermVectorOffsets()) {
builder.append("_offsets");
}
if (fieldType.storeTermVectorPayloads()) {
builder.append("_payloads");
}
return builder.toString();
}
}
示例2: indexOneDoc
import org.apache.lucene.document.FieldType; //导入依赖的package包/类
private IndexReader indexOneDoc(Directory dir, String field, String value, Analyzer analyzer) throws IOException {
IndexWriterConfig iwc = newIndexWriterConfig(analyzer);
iwc.setMergePolicy(newLogMergePolicy());
RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
FieldType ft = new FieldType(TextField.TYPE_STORED);
ft.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
Field textField = new Field(field, "", ft);
Document doc = new Document();
doc.add(textField);
textField.setStringValue(value);
iw.addDocument(doc);
IndexReader ir = iw.getReader();
iw.close();
return ir;
}
示例3: testVectorHighlighter
import org.apache.lucene.document.FieldType; //导入依赖的package包/类
public void testVectorHighlighter() throws Exception {
Directory dir = new RAMDirectory();
IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.STANDARD_ANALYZER));
Document document = new Document();
document.add(new TextField("_id", "1", Field.Store.YES));
FieldType vectorsType = new FieldType(TextField.TYPE_STORED);
vectorsType.setStoreTermVectors(true);
vectorsType.setStoreTermVectorPositions(true);
vectorsType.setStoreTermVectorOffsets(true);
document.add(new Field("content", "the big bad dog", vectorsType));
indexWriter.addDocument(document);
IndexReader reader = DirectoryReader.open(indexWriter);
IndexSearcher searcher = new IndexSearcher(reader);
TopDocs topDocs = searcher.search(new TermQuery(new Term("_id", "1")), 1);
assertThat(topDocs.totalHits, equalTo(1));
FastVectorHighlighter highlighter = new FastVectorHighlighter();
String fragment = highlighter.getBestFragment(highlighter.getFieldQuery(new TermQuery(new Term("content", "bad"))),
reader, topDocs.scoreDocs[0].doc, "content", 30);
assertThat(fragment, notNullValue());
assertThat(fragment, equalTo("the big <b>bad</b> dog"));
}
示例4: testVectorHighlighterNoStore
import org.apache.lucene.document.FieldType; //导入依赖的package包/类
public void testVectorHighlighterNoStore() throws Exception {
Directory dir = new RAMDirectory();
IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.STANDARD_ANALYZER));
Document document = new Document();
document.add(new TextField("_id", "1", Field.Store.YES));
FieldType vectorsType = new FieldType(TextField.TYPE_NOT_STORED);
vectorsType.setStoreTermVectors(true);
vectorsType.setStoreTermVectorPositions(true);
vectorsType.setStoreTermVectorOffsets(true);
document.add(new Field("content", "the big bad dog", vectorsType));
indexWriter.addDocument(document);
IndexReader reader = DirectoryReader.open(indexWriter);
IndexSearcher searcher = new IndexSearcher(reader);
TopDocs topDocs = searcher.search(new TermQuery(new Term("_id", "1")), 1);
assertThat(topDocs.totalHits, equalTo(1));
FastVectorHighlighter highlighter = new FastVectorHighlighter();
String fragment = highlighter.getBestFragment(highlighter.getFieldQuery(new TermQuery(new Term("content", "bad"))),
reader, topDocs.scoreDocs[0].doc, "content", 30);
assertThat(fragment, nullValue());
}
示例5: testNoTokens
import org.apache.lucene.document.FieldType; //导入依赖的package包/类
public void testNoTokens() throws Exception {
Directory dir = new RAMDirectory();
IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.KEYWORD_ANALYZER));
FieldType allFt = getAllFieldType();
Document doc = new Document();
doc.add(new Field("_id", "1", StoredField.TYPE));
doc.add(new AllField("_all", "", 2.0f, allFt));
indexWriter.addDocument(doc);
IndexReader reader = DirectoryReader.open(indexWriter);
IndexSearcher searcher = new IndexSearcher(reader);
TopDocs docs = searcher.search(new MatchAllDocsQuery(), 10);
assertThat(docs.totalHits, equalTo(1));
assertThat(docs.scoreDocs[0].doc, equalTo(0));
}
示例6: testFieldTypeToTermVectorString
import org.apache.lucene.document.FieldType; //导入依赖的package包/类
public void testFieldTypeToTermVectorString() throws Exception {
FieldType ft = new FieldType();
ft.setStoreTermVectorOffsets(false);
ft.setStoreTermVectorPayloads(true);
ft.setStoreTermVectors(true);
ft.setStoreTermVectorPositions(true);
String ftOpts = FieldMapper.termVectorOptionsToString(ft);
assertThat("with_positions_payloads", equalTo(ftOpts));
AllFieldMapper.Builder builder = new AllFieldMapper.Builder(null);
boolean exceptiontrown = false;
try {
TypeParsers.parseTermVector("", ftOpts, builder);
} catch (MapperParsingException e) {
exceptiontrown = true;
}
assertThat("TypeParsers.parseTermVector should accept string with_positions_payloads but does not.", exceptiontrown, equalTo(false));
}
示例7: createWith
import org.apache.lucene.document.FieldType; //导入依赖的package包/类
/**
* Creates Lucene Document using two strings: body and title
*
* @return resulted document
*/
public static Document createWith(final String titleStr, final String bodyStr) {
final Document document = new Document();
final FieldType textIndexedType = new FieldType();
textIndexedType.setStored(true);
textIndexedType.setIndexOptions(IndexOptions.DOCS);
textIndexedType.setTokenized(true);
//index title
Field title = new Field("title", titleStr, textIndexedType);
//index body
Field body = new Field("body", bodyStr, textIndexedType);
document.add(title);
document.add(body);
return document;
}
示例8: LuceneUtil
import org.apache.lucene.document.FieldType; //导入依赖的package包/类
public LuceneUtil(JasperReportsContext jasperReportsContext, boolean isCaseSensitive, boolean isWholeWordsOnly, boolean removeAccents) {
this.isCaseSensitive = isCaseSensitive;
this.isWholeWordsOnly = isWholeWordsOnly;
this.removeAccents = removeAccents;
this.noneSelector = JRStyledTextAttributeSelector.getNoneSelector(jasperReportsContext);
this.styledTextUtil = JRStyledTextUtil.getInstance(jasperReportsContext);
fieldType = new FieldType();
fieldType.setIndexed(true);
fieldType.setTokenized(true);
fieldType.setStored(true);
fieldType.setStoreTermVectors(true);
fieldType.setStoreTermVectorPositions(true);
fieldType.setStoreTermVectorOffsets(true);
fieldType.freeze();
}
示例9: newGeoDocument
import org.apache.lucene.document.FieldType; //导入依赖的package包/类
protected Document newGeoDocument(OIdentifiable oIdentifiable, Shape shape) {
FieldType ft = new FieldType();
ft.setIndexOptions(IndexOptions.DOCS);
ft.setStored(true);
Document doc = new Document();
doc.add(OLuceneIndexType
.createField(RID, oIdentifiable.getIdentity().toString(), Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS));
for (IndexableField f : strategy.createIndexableFields(shape)) {
doc.add(f);
}
doc.add(new StoredField(strategy.getFieldName(), ctx.toString(shape)));
return doc;
}
示例10: index
import org.apache.lucene.document.FieldType; //导入依赖的package包/类
public void index(Item item) throws IOException {
String id = item.getId();
String text = item.getText();
long publicationTIme = item.getPublicationTime();
Document document = new Document();
Field idField = new StringField("id", id, Store.YES);
document.add(idField);
FieldType fieldType = new FieldType();
fieldType.setStored(true);
fieldType.setIndexed(true);
fieldType.setStoreTermVectors(true);
document.add(new Field("text", text, fieldType));
document.add(new LongField("publicationTIme", publicationTIme, LongField.TYPE_STORED));
if(iwriter != null) {
iwriter.addDocument(document);
}
}
示例11: testExcIndexingDocBeforeDocValues
import org.apache.lucene.document.FieldType; //导入依赖的package包/类
public void testExcIndexingDocBeforeDocValues() throws Exception {
Directory dir = newDirectory();
IndexWriterConfig iwc = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
IndexWriter w = new IndexWriter(dir, iwc);
Document doc = new Document();
FieldType ft = new FieldType(TextField.TYPE_NOT_STORED);
ft.setDocValueType(DocValuesType.SORTED);
ft.freeze();
Field field = new Field("test", "value", ft);
field.setTokenStream(new TokenStream() {
@Override
public boolean incrementToken() {
throw new RuntimeException("no");
}
});
doc.add(field);
try {
w.addDocument(doc);
fail("did not hit exception");
} catch (RuntimeException re) {
// expected
}
w.addDocument(new Document());
w.close();
dir.close();
}
示例12: make1dmfIndex
import org.apache.lucene.document.FieldType; //导入依赖的package包/类
protected void make1dmfIndex( Analyzer analyzer, String... values ) throws Exception {
IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(
TEST_VERSION_CURRENT, analyzer).setOpenMode(OpenMode.CREATE));
Document doc = new Document();
FieldType customType = new FieldType(TextField.TYPE_STORED);
customType.setStoreTermVectors(true);
customType.setStoreTermVectorOffsets(true);
customType.setStoreTermVectorPositions(true);
for( String value: values ) {
doc.add( new Field( F, value, customType) );
}
writer.addDocument( doc );
writer.close();
if (reader != null) reader.close();
reader = DirectoryReader.open(dir);
}
示例13: testMaxThreadPriority
import org.apache.lucene.document.FieldType; //导入依赖的package包/类
public void testMaxThreadPriority() throws IOException {
int pri = Thread.currentThread().getPriority();
try {
Directory dir = newDirectory();
IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()))
.setMaxBufferedDocs(2)
.setMergePolicy(newLogMergePolicy());
((LogMergePolicy) conf.getMergePolicy()).setMergeFactor(2);
IndexWriter iw = new IndexWriter(dir, conf);
Document document = new Document();
FieldType customType = new FieldType(TextField.TYPE_NOT_STORED);
customType.setStoreTermVectors(true);
document.add(newField("tvtest", "a b c", customType));
Thread.currentThread().setPriority(Thread.MAX_PRIORITY);
for(int i=0;i<4;i++)
iw.addDocument(document);
iw.close();
dir.close();
} finally {
Thread.currentThread().setPriority(pri);
}
}
示例14: testBasic
import org.apache.lucene.document.FieldType; //导入依赖的package包/类
public void testBasic() throws Exception {
Directory dir = newDirectory();
RandomIndexWriter w = new RandomIndexWriter(random(), dir);
Document doc = new Document();
FieldType ft = new FieldType(TextField.TYPE_NOT_STORED);
ft.setIndexOptions(IndexOptions.DOCS_AND_FREQS);
Field f = newField("foo", "this is a test test", ft);
doc.add(f);
for (int i = 0; i < 100; i++) {
w.addDocument(doc);
}
IndexReader reader = w.getReader();
w.close();
assertNull(MultiFields.getTermPositionsEnum(reader, null, "foo", new BytesRef("test")));
DocsEnum de = TestUtil.docs(random(), reader, "foo", new BytesRef("test"), null, null, DocsEnum.FLAG_FREQS);
while (de.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
assertEquals(2, de.freq());
}
reader.close();
dir.close();
}
示例15: filter
import org.apache.lucene.document.FieldType; //导入依赖的package包/类
@Override
public boolean filter(FullConcept concept) {
//index
Document doc = new Document();
/** The customized field type for contents field */
FieldType contentFieldType = new FieldType();
contentFieldType.setIndexed(true);
contentFieldType.setStored(true);
contentFieldType.setStoreTermVectors(true);
contentFieldType.setTokenized(true);
doc.add(new Field("contents", concept.getTitle() + "\n" + concept.getPlainContent(), contentFieldType));
doc.add(new StringField("id", Integer.toString(concept.getId()), Field.Store.YES));
doc.add(new StringField("outId", concept.getOutId(), Field.Store.YES));
doc.add(new Field("title", concept.getTitle(), contentFieldType));
try {
writer.addDocument(doc);
} catch (IOException e) {
e.printStackTrace();
}
return true;
}