本文整理汇总了Java中org.apache.lucene.document.DoubleDocValuesField类的典型用法代码示例。如果您正苦于以下问题:Java DoubleDocValuesField类的具体用法?Java DoubleDocValuesField怎么用?Java DoubleDocValuesField使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
DoubleDocValuesField类属于org.apache.lucene.document包,在下文中一共展示了DoubleDocValuesField类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: createDocValueFieldTemplate
import org.apache.lucene.document.DoubleDocValuesField; //导入依赖的package包/类
/**
* Creates a DocValue field template for a NON-language dependent type
* @param fieldCfg
* @return
*/
@SuppressWarnings("unchecked")
public static <FIELD extends Field> FIELD createDocValueFieldTemplate(final IndexDocumentFieldConfig<IndexDocumentValueFieldType> fieldCfg) {
if (fieldCfg == null) return null;
IndexDocumentFieldID fieldId = fieldCfg.getId();
FIELD outField = null;
switch(fieldCfg.getType()) {
case Double:
outField = (FIELD)(new DoubleDocValuesField(fieldId.asString(),0D));
break;
case Float:
outField = (FIELD)(new FloatDocValuesField(fieldId.asString(),0F));
break;
default:
throw new IllegalArgumentException(fieldCfg.getType() + "is NOT a supported type");
}
return outField;
}
示例2: addToDoc
import org.apache.lucene.document.DoubleDocValuesField; //导入依赖的package包/类
void addToDoc(Document doc, Double value){
Preconditions.checkArgument(valueType == Double.class);
if(value == null){
return;
}
doc.add(new DoubleField(indexFieldName, value, stored ? Store.YES : Store.NO));
if(isSorted()){
Preconditions.checkArgument(sortedValueType == SearchFieldSorting.FieldType.DOUBLE);
doc.add(new DoubleDocValuesField(indexFieldName, value));
}
}
示例3: testBasicDouble
import org.apache.lucene.document.DoubleDocValuesField; //导入依赖的package包/类
public void testBasicDouble() throws Exception {
Directory d = newDirectory();
RandomIndexWriter w = new RandomIndexWriter(random(), d);
Document doc = new Document();
DoubleDocValuesField field = new DoubleDocValuesField("field", 0.0);
doc.add(field);
for(long l=0;l<100;l++) {
field.setDoubleValue(l);
w.addDocument(doc);
}
IndexReader r = w.getReader();
FacetsCollector fc = new FacetsCollector();
IndexSearcher s = newSearcher(r);
s.search(new MatchAllDocsQuery(), fc);
Facets facets = new DoubleRangeFacetCounts("field", fc,
new DoubleRange("less than 10", 0.0, true, 10.0, false),
new DoubleRange("less than or equal to 10", 0.0, true, 10.0, true),
new DoubleRange("over 90", 90.0, false, 100.0, false),
new DoubleRange("90 or above", 90.0, true, 100.0, false),
new DoubleRange("over 1000", 1000.0, false, Double.POSITIVE_INFINITY, false));
assertEquals("dim=field path=[] value=21 childCount=5\n less than 10 (10)\n less than or equal to 10 (11)\n over 90 (9)\n 90 or above (10)\n over 1000 (0)\n",
facets.getTopChildren(10, "field").toString());
IOUtils.close(w, r, d);
}
示例4: testDouble
import org.apache.lucene.document.DoubleDocValuesField; //导入依赖的package包/类
/** Tests sorting on type double */
public void testDouble() throws IOException {
Directory dir = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
Document doc = new Document();
doc.add(new DoubleDocValuesField("value", 30.1));
doc.add(newStringField("value", "30.1", Field.Store.YES));
writer.addDocument(doc);
doc = new Document();
doc.add(new DoubleDocValuesField("value", -1.3));
doc.add(newStringField("value", "-1.3", Field.Store.YES));
writer.addDocument(doc);
doc = new Document();
doc.add(new DoubleDocValuesField("value", 4.2333333333333));
doc.add(newStringField("value", "4.2333333333333", Field.Store.YES));
writer.addDocument(doc);
doc = new Document();
doc.add(new DoubleDocValuesField("value", 4.2333333333332));
doc.add(newStringField("value", "4.2333333333332", Field.Store.YES));
writer.addDocument(doc);
IndexReader ir = writer.getReader();
writer.close();
IndexSearcher searcher = newSearcher(ir);
Sort sort = new Sort(new SortField("value", SortField.Type.DOUBLE));
TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
assertEquals(4, td.totalHits);
// numeric order
assertEquals("-1.3", searcher.doc(td.scoreDocs[0].doc).get("value"));
assertEquals("4.2333333333332", searcher.doc(td.scoreDocs[1].doc).get("value"));
assertEquals("4.2333333333333", searcher.doc(td.scoreDocs[2].doc).get("value"));
assertEquals("30.1", searcher.doc(td.scoreDocs[3].doc).get("value"));
assertNoFieldCaches();
ir.close();
dir.close();
}
示例5: testDoubleSignedZero
import org.apache.lucene.document.DoubleDocValuesField; //导入依赖的package包/类
/** Tests sorting on type double with +/- zero */
public void testDoubleSignedZero() throws IOException {
Directory dir = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
Document doc = new Document();
doc.add(new DoubleDocValuesField("value", +0D));
doc.add(newStringField("value", "+0", Field.Store.YES));
writer.addDocument(doc);
doc = new Document();
doc.add(new DoubleDocValuesField("value", -0D));
doc.add(newStringField("value", "-0", Field.Store.YES));
writer.addDocument(doc);
doc = new Document();
IndexReader ir = writer.getReader();
writer.close();
IndexSearcher searcher = newSearcher(ir);
Sort sort = new Sort(new SortField("value", SortField.Type.DOUBLE));
TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
assertEquals(2, td.totalHits);
// numeric order
assertEquals("-0", searcher.doc(td.scoreDocs[0].doc).get("value"));
assertEquals("+0", searcher.doc(td.scoreDocs[1].doc).get("value"));
ir.close();
dir.close();
}
示例6: testDoubleReverse
import org.apache.lucene.document.DoubleDocValuesField; //导入依赖的package包/类
/** Tests sorting on type double in reverse */
public void testDoubleReverse() throws IOException {
Directory dir = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
Document doc = new Document();
doc.add(new DoubleDocValuesField("value", 30.1));
doc.add(newStringField("value", "30.1", Field.Store.YES));
writer.addDocument(doc);
doc = new Document();
doc.add(new DoubleDocValuesField("value", -1.3));
doc.add(newStringField("value", "-1.3", Field.Store.YES));
writer.addDocument(doc);
doc = new Document();
doc.add(new DoubleDocValuesField("value", 4.2333333333333));
doc.add(newStringField("value", "4.2333333333333", Field.Store.YES));
writer.addDocument(doc);
doc = new Document();
doc.add(new DoubleDocValuesField("value", 4.2333333333332));
doc.add(newStringField("value", "4.2333333333332", Field.Store.YES));
writer.addDocument(doc);
IndexReader ir = writer.getReader();
writer.close();
IndexSearcher searcher = newSearcher(ir);
Sort sort = new Sort(new SortField("value", SortField.Type.DOUBLE, true));
TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
assertEquals(4, td.totalHits);
// numeric order
assertEquals("30.1", searcher.doc(td.scoreDocs[0].doc).get("value"));
assertEquals("4.2333333333333", searcher.doc(td.scoreDocs[1].doc).get("value"));
assertEquals("4.2333333333332", searcher.doc(td.scoreDocs[2].doc).get("value"));
assertEquals("-1.3", searcher.doc(td.scoreDocs[3].doc).get("value"));
assertNoFieldCaches();
ir.close();
dir.close();
}
示例7: testDoubleMissing
import org.apache.lucene.document.DoubleDocValuesField; //导入依赖的package包/类
/** Tests sorting on type double with a missing value */
public void testDoubleMissing() throws IOException {
Directory dir = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
Document doc = new Document();
writer.addDocument(doc);
doc = new Document();
doc.add(new DoubleDocValuesField("value", -1.3));
doc.add(newStringField("value", "-1.3", Field.Store.YES));
writer.addDocument(doc);
doc = new Document();
doc.add(new DoubleDocValuesField("value", 4.2333333333333));
doc.add(newStringField("value", "4.2333333333333", Field.Store.YES));
writer.addDocument(doc);
doc = new Document();
doc.add(new DoubleDocValuesField("value", 4.2333333333332));
doc.add(newStringField("value", "4.2333333333332", Field.Store.YES));
writer.addDocument(doc);
IndexReader ir = writer.getReader();
writer.close();
IndexSearcher searcher = newSearcher(ir);
Sort sort = new Sort(new SortField("value", SortField.Type.DOUBLE));
TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
assertEquals(4, td.totalHits);
// null treated as a 0
assertEquals("-1.3", searcher.doc(td.scoreDocs[0].doc).get("value"));
assertNull(searcher.doc(td.scoreDocs[1].doc).get("value"));
assertEquals("4.2333333333332", searcher.doc(td.scoreDocs[2].doc).get("value"));
assertEquals("4.2333333333333", searcher.doc(td.scoreDocs[3].doc).get("value"));
ir.close();
dir.close();
}
示例8: testDoubleMissingLast
import org.apache.lucene.document.DoubleDocValuesField; //导入依赖的package包/类
/** Tests sorting on type double, specifying the missing value should be treated as Double.MAX_VALUE */
public void testDoubleMissingLast() throws IOException {
Directory dir = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
Document doc = new Document();
writer.addDocument(doc);
doc = new Document();
doc.add(new DoubleDocValuesField("value", -1.3));
doc.add(newStringField("value", "-1.3", Field.Store.YES));
writer.addDocument(doc);
doc = new Document();
doc.add(new DoubleDocValuesField("value", 4.2333333333333));
doc.add(newStringField("value", "4.2333333333333", Field.Store.YES));
writer.addDocument(doc);
doc = new Document();
doc.add(new DoubleDocValuesField("value", 4.2333333333332));
doc.add(newStringField("value", "4.2333333333332", Field.Store.YES));
writer.addDocument(doc);
IndexReader ir = writer.getReader();
writer.close();
IndexSearcher searcher = newSearcher(ir);
SortField sortField = new SortField("value", SortField.Type.DOUBLE);
sortField.setMissingValue(Double.MAX_VALUE);
Sort sort = new Sort(sortField);
TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
assertEquals(4, td.totalHits);
// null treated as Double.MAX_VALUE
assertEquals("-1.3", searcher.doc(td.scoreDocs[0].doc).get("value"));
assertEquals("4.2333333333332", searcher.doc(td.scoreDocs[1].doc).get("value"));
assertEquals("4.2333333333333", searcher.doc(td.scoreDocs[2].doc).get("value"));
assertNull(searcher.doc(td.scoreDocs[3].doc).get("value"));
ir.close();
dir.close();
}
示例9: testDouble
import org.apache.lucene.document.DoubleDocValuesField; //导入依赖的package包/类
/** Tests sorting on type double */
public void testDouble() throws IOException {
Directory dir = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
Document doc = new Document();
doc.add(new DoubleDocValuesField("value", 30.1));
doc.add(newStringField("value", "30.1", Field.Store.YES));
writer.addDocument(doc);
doc = new Document();
doc.add(new DoubleDocValuesField("value", -1.3));
doc.add(newStringField("value", "-1.3", Field.Store.YES));
writer.addDocument(doc);
doc = new Document();
doc.add(new DoubleDocValuesField("value", 4.2333333333333));
doc.add(newStringField("value", "4.2333333333333", Field.Store.YES));
writer.addDocument(doc);
doc = new Document();
doc.add(new DoubleDocValuesField("value", 4.2333333333332));
doc.add(newStringField("value", "4.2333333333332", Field.Store.YES));
writer.addDocument(doc);
IndexReader ir = writer.getReader();
writer.close();
IndexSearcher searcher = new IndexSearcher(ir);
Sort sort = new Sort(new SortField("value", SortField.Type.DOUBLE));
TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
assertEquals(4, td.totalHits);
// numeric order
assertEquals("-1.3", searcher.doc(td.scoreDocs[0].doc).get("value"));
assertEquals("4.2333333333332", searcher.doc(td.scoreDocs[1].doc).get("value"));
assertEquals("4.2333333333333", searcher.doc(td.scoreDocs[2].doc).get("value"));
assertEquals("30.1", searcher.doc(td.scoreDocs[3].doc).get("value"));
assertNoFieldCaches();
ir.close();
dir.close();
}
示例10: testDoubleReverse
import org.apache.lucene.document.DoubleDocValuesField; //导入依赖的package包/类
/** Tests sorting on type double in reverse */
public void testDoubleReverse() throws IOException {
Directory dir = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
Document doc = new Document();
doc.add(new DoubleDocValuesField("value", 30.1));
doc.add(newStringField("value", "30.1", Field.Store.YES));
writer.addDocument(doc);
doc = new Document();
doc.add(new DoubleDocValuesField("value", -1.3));
doc.add(newStringField("value", "-1.3", Field.Store.YES));
writer.addDocument(doc);
doc = new Document();
doc.add(new DoubleDocValuesField("value", 4.2333333333333));
doc.add(newStringField("value", "4.2333333333333", Field.Store.YES));
writer.addDocument(doc);
doc = new Document();
doc.add(new DoubleDocValuesField("value", 4.2333333333332));
doc.add(newStringField("value", "4.2333333333332", Field.Store.YES));
writer.addDocument(doc);
IndexReader ir = writer.getReader();
writer.close();
IndexSearcher searcher = new IndexSearcher(ir);
Sort sort = new Sort(new SortField("value", SortField.Type.DOUBLE, true));
TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
assertEquals(4, td.totalHits);
// numeric order
assertEquals("30.1", searcher.doc(td.scoreDocs[0].doc).get("value"));
assertEquals("4.2333333333333", searcher.doc(td.scoreDocs[1].doc).get("value"));
assertEquals("4.2333333333332", searcher.doc(td.scoreDocs[2].doc).get("value"));
assertEquals("-1.3", searcher.doc(td.scoreDocs[3].doc).get("value"));
assertNoFieldCaches();
ir.close();
dir.close();
}
示例11: testBasicDouble
import org.apache.lucene.document.DoubleDocValuesField; //导入依赖的package包/类
public void testBasicDouble() throws Exception {
Directory d = newDirectory();
RandomIndexWriter w = new RandomIndexWriter(random(), d);
Document doc = new Document();
DoubleDocValuesField field = new DoubleDocValuesField("field", 0.0);
doc.add(field);
for(long l=0;l<100;l++) {
field.setDoubleValue(l);
w.addDocument(doc);
}
IndexReader r = w.getReader();
w.close();
RangeAccumulator a = new RangeAccumulator(new RangeFacetRequest<DoubleRange>("field",
new DoubleRange("less than 10", 0.0, true, 10.0, false),
new DoubleRange("less than or equal to 10", 0.0, true, 10.0, true),
new DoubleRange("over 90", 90.0, false, 100.0, false),
new DoubleRange("90 or above", 90.0, true, 100.0, false),
new DoubleRange("over 1000", 1000.0, false, Double.POSITIVE_INFINITY, false)));
FacetsCollector fc = FacetsCollector.create(a);
IndexSearcher s = newSearcher(r);
s.search(new MatchAllDocsQuery(), fc);
List<FacetResult> result = fc.getFacetResults();
assertEquals(1, result.size());
assertEquals("field (0)\n less than 10 (10)\n less than or equal to 10 (11)\n over 90 (9)\n 90 or above (10)\n over 1000 (0)\n", FacetTestUtils.toSimpleString(result.get(0)));
r.close();
d.close();
}
示例12: addDoc
import org.apache.lucene.document.DoubleDocValuesField; //导入依赖的package包/类
private void addDoc(IndexWriter writer, int id) throws IOException
{
Document doc = new Document();
doc.add(new TextField("content", "aaa", Field.Store.NO));
doc.add(new StringField("id", Integer.toString(id), Field.Store.YES));
FieldType customType2 = new FieldType(TextField.TYPE_STORED);
customType2.setStoreTermVectors(true);
customType2.setStoreTermVectorPositions(true);
customType2.setStoreTermVectorOffsets(true);
doc.add(new Field("autf8", "Lu\uD834\uDD1Ece\uD834\uDD60ne \u0000 \u2620 ab\ud917\udc17cd", customType2));
doc.add(new Field("utf8", "Lu\uD834\uDD1Ece\uD834\uDD60ne \u0000 \u2620 ab\ud917\udc17cd", customType2));
doc.add(new Field("content2", "here is more content with aaa aaa aaa", customType2));
doc.add(new Field("fie\u2C77ld", "field with non-ascii name", customType2));
// add numeric fields, to test if flex preserves encoding
doc.add(new IntField("trieInt", id, Field.Store.NO));
doc.add(new LongField("trieLong", (long) id, Field.Store.NO));
// add docvalues fields
doc.add(new NumericDocValuesField("dvByte", (byte) id));
byte bytes[] = new byte[] {
(byte)(id >>> 24), (byte)(id >>> 16),(byte)(id >>> 8),(byte)id
};
BytesRef ref = new BytesRef(bytes);
doc.add(new BinaryDocValuesField("dvBytesDerefFixed", ref));
doc.add(new BinaryDocValuesField("dvBytesDerefVar", ref));
doc.add(new SortedDocValuesField("dvBytesSortedFixed", ref));
doc.add(new SortedDocValuesField("dvBytesSortedVar", ref));
doc.add(new BinaryDocValuesField("dvBytesStraightFixed", ref));
doc.add(new BinaryDocValuesField("dvBytesStraightVar", ref));
doc.add(new DoubleDocValuesField("dvDouble", (double)id));
doc.add(new FloatDocValuesField("dvFloat", (float)id));
doc.add(new NumericDocValuesField("dvInt", id));
doc.add(new NumericDocValuesField("dvLong", id));
doc.add(new NumericDocValuesField("dvPacked", id));
doc.add(new NumericDocValuesField("dvShort", (short)id));
// a field with both offsets and term vectors for a cross-check
FieldType customType3 = new FieldType(TextField.TYPE_STORED);
customType3.setStoreTermVectors(true);
customType3.setStoreTermVectorPositions(true);
customType3.setStoreTermVectorOffsets(true);
customType3.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
doc.add(new Field("content5", "here is more content with aaa aaa aaa", customType3));
// a field that omits only positions
FieldType customType4 = new FieldType(TextField.TYPE_STORED);
customType4.setStoreTermVectors(true);
customType4.setStoreTermVectorPositions(false);
customType4.setStoreTermVectorOffsets(true);
customType4.setIndexOptions(IndexOptions.DOCS_AND_FREQS);
doc.add(new Field("content6", "here is more content with aaa aaa aaa", customType4));
// TODO:
// index different norms types via similarity (we use a random one currently?!)
// remove any analyzer randomness, explicitly add payloads for certain fields.
writer.addDocument(doc);
}
示例13: addDoc
import org.apache.lucene.document.DoubleDocValuesField; //导入依赖的package包/类
private void addDoc(IndexWriter writer, int id) throws IOException
{
Document doc = new Document();
doc.add(new TextField("content", "aaa", Field.Store.NO));
doc.add(new StringField("id", Integer.toString(id), Field.Store.YES));
FieldType customType2 = new FieldType(TextField.TYPE_STORED);
customType2.setStoreTermVectors(true);
customType2.setStoreTermVectorPositions(true);
customType2.setStoreTermVectorOffsets(true);
doc.add(new Field("autf8", "Lu\uD834\uDD1Ece\uD834\uDD60ne \u0000 \u2620 ab\ud917\udc17cd", customType2));
doc.add(new Field("utf8", "Lu\uD834\uDD1Ece\uD834\uDD60ne \u0000 \u2620 ab\ud917\udc17cd", customType2));
doc.add(new Field("content2", "here is more content with aaa aaa aaa", customType2));
doc.add(new Field("fie\u2C77ld", "field with non-ascii name", customType2));
// add numeric fields, to test if flex preserves encoding
doc.add(new IntField("trieInt", id, Field.Store.NO));
doc.add(new LongField("trieLong", (long) id, Field.Store.NO));
// add docvalues fields
doc.add(new NumericDocValuesField("dvByte", (byte) id));
byte bytes[] = new byte[] {
(byte)(id >>> 24), (byte)(id >>> 16),(byte)(id >>> 8),(byte)id
};
BytesRef ref = new BytesRef(bytes);
doc.add(new BinaryDocValuesField("dvBytesDerefFixed", ref));
doc.add(new BinaryDocValuesField("dvBytesDerefVar", ref));
doc.add(new SortedDocValuesField("dvBytesSortedFixed", ref));
doc.add(new SortedDocValuesField("dvBytesSortedVar", ref));
doc.add(new BinaryDocValuesField("dvBytesStraightFixed", ref));
doc.add(new BinaryDocValuesField("dvBytesStraightVar", ref));
doc.add(new DoubleDocValuesField("dvDouble", (double)id));
doc.add(new FloatDocValuesField("dvFloat", (float)id));
doc.add(new NumericDocValuesField("dvInt", id));
doc.add(new NumericDocValuesField("dvLong", id));
doc.add(new NumericDocValuesField("dvPacked", id));
doc.add(new NumericDocValuesField("dvShort", (short)id));
doc.add(new SortedSetDocValuesField("dvSortedSet", ref));
doc.add(new SortedNumericDocValuesField("dvSortedNumeric", id));
// a field with both offsets and term vectors for a cross-check
FieldType customType3 = new FieldType(TextField.TYPE_STORED);
customType3.setStoreTermVectors(true);
customType3.setStoreTermVectorPositions(true);
customType3.setStoreTermVectorOffsets(true);
customType3.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
doc.add(new Field("content5", "here is more content with aaa aaa aaa", customType3));
// a field that omits only positions
FieldType customType4 = new FieldType(TextField.TYPE_STORED);
customType4.setStoreTermVectors(true);
customType4.setStoreTermVectorPositions(false);
customType4.setStoreTermVectorOffsets(true);
customType4.setIndexOptions(IndexOptions.DOCS_AND_FREQS);
doc.add(new Field("content6", "here is more content with aaa aaa aaa", customType4));
// TODO:
// index different norms types via similarity (we use a random one currently?!)
// remove any analyzer randomness, explicitly add payloads for certain fields.
writer.addDocument(doc);
}
示例14: indexDocs
import org.apache.lucene.document.DoubleDocValuesField; //导入依赖的package包/类
private void indexDocs(IndexWriter writer, File file) throws IOException {
// do not try to index files that cannot be read
if (file.canRead()) {
if (file.isDirectory()) {
String[] files = file.list();
if (files != null) {
for (int i = 0; i < files.length; i++) {
indexDocs(writer, new File(file, files[i]));
}
}
} else { // it is a file
FileInputStream fis;
try {
fis = new FileInputStream(file);
} catch (FileNotFoundException fnfe) {
// at least on windows, some temporary files raise this exception with an "access denied" message
// checking if the file can be read doesn't help
return;
}
try {
// make a new, empty document
Document doc = new Document();
// Add the path of the file as a field named "path". Use a field that is indexed (i.e. searchable),
// but don't tokenize the field into separate words and don't index term frequency or positional information:
String op_name = file.getName().replaceAll("<", "").replaceAll(">", "");
doc.add(new StringField("path", file.getPath(), Field.Store.YES));
doc.add(new StringField("op_name", op_name, Field.Store.YES));
// Add the last modified date of the file a field named "modified".
// Use a LongField that is indexed (i.e. efficiently filterable with NumericRangeFilter). This indexes to millisecond resolution, which
// is often too fine. You could instead create a number based on year/month/day/hour/minutes/seconds, down the resolution you require.
// For example the long value 2011021714 would mean February 17, 2011, 2-3 PM.
doc.add(new LongField("modified", file.lastModified(), Field.Store.NO));
// Add the contents of the file to a field named "contents".
// Specify a Reader, so that the text of the file is tokenized and indexed, but not stored.
// Note that FileReader expects the file to be in UTF-8 encoding.
// If that's not the case searching for special characters will fail.
doc.add(new TextField("op_desc", new BufferedReader(new InputStreamReader(fis, "UTF-8"))));
double boost = default_doc_boost;
if (m_oConfigPopularities.containsKey(op_name)) {
boost = m_oConfigPopularities.get(op_name);
} else if (m_oConfigPopularities.containsKey(op_name.toLowerCase())) {
boost = m_oConfigPopularities.get(op_name.toLowerCase());
}
doc.add(new DoubleDocValuesField("boost", boost));
if (writer.getConfig().getOpenMode() == IndexWriterConfig.OpenMode.CREATE) {
// New index, so we just add the document (no old document can be there):
writer.addDocument(doc);
} else {
// Existing index (an old copy of this document may have been indexed)
// so we use updateDocument instead to replace the old one matching the exact path, if present:
System.out.println("updating " + file);
writer.updateDocument(new Term("path", file.getPath()), doc);
}
} finally {
fis.close();
}
}
}
}
示例15: buildDoc
import org.apache.lucene.document.DoubleDocValuesField; //导入依赖的package包/类
static Document buildDoc(JSONObject json) throws Exception{
Document doc = new Document();
doc.add(new NumericDocValuesField("id", json.getLong("id")));
doc.add(new DoubleDocValuesField("price", json.optDouble("price")));
doc.add(new TextField("contents", json.optString("contents"), Store.NO));
doc.add(new NumericDocValuesField("year", json.optInt("year")));
doc.add(new NumericDocValuesField("mileage", json.optInt("mileage")));
addMetaString(doc,"color", json.optString("color"));
addMetaString(doc,"category", json.optString("category"));
addMetaString(doc,"makemodel", json.optString("makemodel"));
addMetaString(doc,"city", json.optString("city"));
String tagsString = json.optString("tags");
if (tagsString != null) {
String[] parts = tagsString.split(",");
if (parts.length > 0) {
for (String part : parts) {
doc.add(new SortedSetDocValuesField("tags", new BytesRef(part)));
doc.add(new StringField("tags_indexed", part, Store.NO));
}
}
// store everything
FieldType ft = new FieldType();
ft.setOmitNorms(false);
ft.setTokenized(true);
ft.setStoreTermVectors(true);
ft.setStoreTermVectorOffsets(true);
ft.setStoreTermVectorPayloads(true);
ft.setStoreTermVectorPositions(true);
ft.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
Field tagPayload = new Field("tags_payload", new PayloadTokenizer(tagsString), ft);
doc.add(tagPayload);
}
doc.add(new BinaryDocValuesField("json", new BytesRef(json.toString())));
return doc;
}