当前位置: 首页>>代码示例>>Java>>正文


Java NumericDocValuesField类代码示例

本文整理汇总了Java中org.apache.lucene.document.NumericDocValuesField的典型用法代码示例。如果您正苦于以下问题:Java NumericDocValuesField类的具体用法?Java NumericDocValuesField怎么用?Java NumericDocValuesField使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


NumericDocValuesField类属于org.apache.lucene.document包,在下文中一共展示了NumericDocValuesField类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: getIndex

import org.apache.lucene.document.NumericDocValuesField; //导入依赖的package包/类
private Engine.Index getIndex(final String id) {
    final String type = "test";
    final ParseContext.Document document = new ParseContext.Document();
    document.add(new TextField("test", "test", Field.Store.YES));
    final Field uidField = new Field("_uid", Uid.createUid(type, id), UidFieldMapper.Defaults.FIELD_TYPE);
    final Field versionField = new NumericDocValuesField("_version", Versions.MATCH_ANY);
    final SeqNoFieldMapper.SequenceID seqID = SeqNoFieldMapper.SequenceID.emptySeqID();
    document.add(uidField);
    document.add(versionField);
    document.add(seqID.seqNo);
    document.add(seqID.seqNoDocValue);
    document.add(seqID.primaryTerm);
    final BytesReference source = new BytesArray(new byte[] { 1 });
    final ParsedDocument doc =
        new ParsedDocument(versionField, seqID, id, type, null, Arrays.asList(document), source, XContentType.JSON, null);
    return new Engine.Index(new Term("_uid", doc.uid()), doc);
}
 
开发者ID:justor,项目名称:elasticsearch_my,代码行数:18,代码来源:RecoverySourceHandlerTests.java

示例2: testNumericDocValues

import org.apache.lucene.document.NumericDocValuesField; //导入依赖的package包/类
public void testNumericDocValues() throws IOException {
    testCase(new MatchAllDocsQuery(), iw -> {
        iw.addDocument(singleton(new NumericDocValuesField(FIELD_NAME, 1)));
        iw.addDocument(singleton(new NumericDocValuesField(FIELD_NAME, 2)));
        iw.addDocument(singleton(new NumericDocValuesField(FIELD_NAME, 1)));
        iw.addDocument(singleton(new NumericDocValuesField(FIELD_NAME, 2)));
        iw.addDocument(singleton(new NumericDocValuesField(FIELD_NAME, 1)));
        iw.addDocument(singleton(new NumericDocValuesField(FIELD_NAME, 2)));
        iw.addDocument(singleton(new NumericDocValuesField(FIELD_NAME, 1)));
        iw.addDocument(singleton(new NumericDocValuesField(FIELD_NAME, 2)));
        iw.addDocument(singleton(new NumericDocValuesField(FIELD_NAME, 1)));
        iw.addDocument(singleton(new NumericDocValuesField(FIELD_NAME, 2)));
        iw.addDocument(singleton(new NumericDocValuesField(FIELD_NAME, 1)));
        iw.addDocument(singleton(new NumericDocValuesField(FIELD_NAME, 2)));
        iw.addDocument(singleton(new NumericDocValuesField(FIELD_NAME, 1)));
        iw.addDocument(singleton(new NumericDocValuesField(FIELD_NAME, 2)));
        iw.addDocument(singleton(new NumericDocValuesField(FIELD_NAME, 1)));
        iw.addDocument(singleton(new NumericDocValuesField(FIELD_NAME, 2)));
    }, count -> assertEquals(24L, count.getValue(), 0d));
}
 
开发者ID:justor,项目名称:elasticsearch_my,代码行数:21,代码来源:SumAggregatorTests.java

示例3: testSlowLogParsedDocumentPrinterSourceToLog

import org.apache.lucene.document.NumericDocValuesField; //导入依赖的package包/类
public void testSlowLogParsedDocumentPrinterSourceToLog() throws IOException {
    BytesReference source = JsonXContent.contentBuilder().startObject().field("foo", "bar").endObject().bytes();
    ParsedDocument pd = new ParsedDocument(new NumericDocValuesField("version", 1), SeqNoFieldMapper.SequenceID.emptySeqID(), "id",
            "test", null, null, source, XContentType.JSON, null);
    Index index = new Index("foo", "123");
    // Turning off document logging doesn't log source[]
    SlowLogParsedDocumentPrinter p = new SlowLogParsedDocumentPrinter(index, pd, 10, true, 0);
    assertThat(p.toString(), not(containsString("source[")));

    // Turning on document logging logs the whole thing
    p = new SlowLogParsedDocumentPrinter(index, pd, 10, true, Integer.MAX_VALUE);
    assertThat(p.toString(), containsString("source[{\"foo\":\"bar\"}]"));

    // And you can truncate the source
    p = new SlowLogParsedDocumentPrinter(index, pd, 10, true, 3);
    assertThat(p.toString(), containsString("source[{\"f]"));

    // And you can truncate the source
    p = new SlowLogParsedDocumentPrinter(index, pd, 10, true, 3);
    assertThat(p.toString(), containsString("source[{\"f]"));
    assertThat(p.toString(), startsWith("[foo/123] took"));
}
 
开发者ID:justor,项目名称:elasticsearch_my,代码行数:23,代码来源:IndexingSlowLogTests.java

示例4: testSimple

import org.apache.lucene.document.NumericDocValuesField; //导入依赖的package包/类
/** 
 * test version lookup actually works
 */
public void testSimple() throws Exception {
    Directory dir = newDirectory();
    IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(Lucene.STANDARD_ANALYZER));
    Document doc = new Document();
    doc.add(new Field(UidFieldMapper.NAME, "6", UidFieldMapper.Defaults.FIELD_TYPE));
    doc.add(new NumericDocValuesField(VersionFieldMapper.NAME, 87));
    writer.addDocument(doc);
    DirectoryReader reader = DirectoryReader.open(writer);
    LeafReaderContext segment = reader.leaves().get(0);
    PerThreadIDAndVersionLookup lookup = new PerThreadIDAndVersionLookup(segment.reader());
    // found doc
    DocIdAndVersion result = lookup.lookup(new BytesRef("6"), null, segment);
    assertNotNull(result);
    assertEquals(87, result.version);
    assertEquals(0, result.docId);
    // not found doc
    assertNull(lookup.lookup(new BytesRef("7"), null, segment));
    // deleted doc
    assertNull(lookup.lookup(new BytesRef("6"), new Bits.MatchNoBits(1), segment));
    reader.close();
    writer.close();
    dir.close();
}
 
开发者ID:justor,项目名称:elasticsearch_my,代码行数:27,代码来源:VersionLookupTests.java

示例5: testCache

import org.apache.lucene.document.NumericDocValuesField; //导入依赖的package包/类
/** Test that version map cache works, is evicted on close, etc */
public void testCache() throws Exception {
    int size = Versions.lookupStates.size();

    Directory dir = newDirectory();
    IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(Lucene.STANDARD_ANALYZER));
    Document doc = new Document();
    doc.add(new Field(UidFieldMapper.NAME, "6", UidFieldMapper.Defaults.FIELD_TYPE));
    doc.add(new NumericDocValuesField(VersionFieldMapper.NAME, 87));
    writer.addDocument(doc);
    DirectoryReader reader = DirectoryReader.open(writer);
    // should increase cache size by 1
    assertEquals(87, Versions.loadVersion(reader, new Term(UidFieldMapper.NAME, "6")));
    assertEquals(size+1, Versions.lookupStates.size());
    // should be cache hit
    assertEquals(87, Versions.loadVersion(reader, new Term(UidFieldMapper.NAME, "6")));
    assertEquals(size+1, Versions.lookupStates.size());

    reader.close();
    writer.close();
    // core should be evicted from the map
    assertEquals(size, Versions.lookupStates.size());
    dir.close();
}
 
开发者ID:justor,项目名称:elasticsearch_my,代码行数:25,代码来源:VersionsTests.java

示例6: testCacheFilterReader

import org.apache.lucene.document.NumericDocValuesField; //导入依赖的package包/类
/** Test that version map cache behaves properly with a filtered reader */
public void testCacheFilterReader() throws Exception {
    int size = Versions.lookupStates.size();

    Directory dir = newDirectory();
    IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(Lucene.STANDARD_ANALYZER));
    Document doc = new Document();
    doc.add(new Field(UidFieldMapper.NAME, "6", UidFieldMapper.Defaults.FIELD_TYPE));
    doc.add(new NumericDocValuesField(VersionFieldMapper.NAME, 87));
    writer.addDocument(doc);
    DirectoryReader reader = DirectoryReader.open(writer);
    assertEquals(87, Versions.loadVersion(reader, new Term(UidFieldMapper.NAME, "6")));
    assertEquals(size+1, Versions.lookupStates.size());
    // now wrap the reader
    DirectoryReader wrapped = ElasticsearchDirectoryReader.wrap(reader, new ShardId("bogus", "_na_", 5));
    assertEquals(87, Versions.loadVersion(wrapped, new Term(UidFieldMapper.NAME, "6")));
    // same size map: core cache key is shared
    assertEquals(size+1, Versions.lookupStates.size());

    reader.close();
    writer.close();
    // core should be evicted from the map
    assertEquals(size, Versions.lookupStates.size());
    dir.close();
}
 
开发者ID:justor,项目名称:elasticsearch_my,代码行数:26,代码来源:VersionsTests.java

示例7: transform

import org.apache.lucene.document.NumericDocValuesField; //导入依赖的package包/类
@Override
public Document transform(final Example input) throws TransformException {
    final Document doc = new Document();

    doc.add(new Field(ExampleField.ID.getName(), input.getId(), StringField.TYPE_STORED));
    doc.add(new SortedDocValuesField(ExampleField.ID.getName(), new BytesRef(input.getId())));

    doc.add(new Field(ExampleField.TITLE.getName(), input.getTitle(), TextField.TYPE_STORED));
    doc.add(new Field(ExampleField.BODY.getName(), input.getBody(), TextField.TYPE_STORED));

    doc.add(new Field(ExampleField.COLOR.getName(), input.getColor(), StringField.TYPE_STORED));
    doc.add(new SortedSetDocValuesFacetField(ExampleField.COLOR.getName(), input.getColor()));

    final Date createDate = input.getCreateDate();
    doc.add(new NumericDocValuesField(ExampleField.CREATE_DATE.getName(), createDate.getTime()));
    doc.add(new StoredField(ExampleField.CREATE_DATE.getName(), createDate.getTime()));

    return doc;
}
 
开发者ID:bbende,项目名称:tripod,代码行数:20,代码来源:ExampleIndexTransformer.java

示例8: index

import org.apache.lucene.document.NumericDocValuesField; //导入依赖的package包/类
/** Build the example index. */
private void index() throws IOException {
  IndexWriter indexWriter = new IndexWriter(indexDir, new IndexWriterConfig(FacetExamples.EXAMPLES_VER, 
      new WhitespaceAnalyzer()));

  // Writes facet ords to a separate directory from the main index
  DirectoryTaxonomyWriter taxoWriter = new DirectoryTaxonomyWriter(taxoDir);

  Document doc = new Document();
  doc.add(new TextField("c", "foo bar", Store.NO));
  doc.add(new NumericDocValuesField("popularity", 5L));
  doc.add(new FacetField("A", "B"));
  indexWriter.addDocument(config.build(taxoWriter, doc));

  doc = new Document();
  doc.add(new TextField("c", "foo foo bar", Store.NO));
  doc.add(new NumericDocValuesField("popularity", 3L));
  doc.add(new FacetField("A", "C"));
  indexWriter.addDocument(config.build(taxoWriter, doc));
  
  indexWriter.close();
  taxoWriter.close();
}
 
开发者ID:europeana,项目名称:search,代码行数:24,代码来源:ExpressionAggregationFacetsExample.java

示例9: index

import org.apache.lucene.document.NumericDocValuesField; //导入依赖的package包/类
/** Build the example index. */
public void index() throws IOException {
  IndexWriter indexWriter = new IndexWriter(indexDir, new IndexWriterConfig(FacetExamples.EXAMPLES_VER, 
      new WhitespaceAnalyzer()));

  // Add documents with a fake timestamp, 1000 sec before
  // "now", 2000 sec before "now", ...:
  for(int i=0;i<100;i++) {
    Document doc = new Document();
    long then = nowSec - i * 1000;
    // Add as doc values field, so we can compute range facets:
    doc.add(new NumericDocValuesField("timestamp", then));
    // Add as numeric field so we can drill-down:
    doc.add(new LongField("timestamp", then, Field.Store.NO));
    indexWriter.addDocument(doc);
  }

  // Open near-real-time searcher
  searcher = new IndexSearcher(DirectoryReader.open(indexWriter, true));
  indexWriter.close();
}
 
开发者ID:europeana,项目名称:search,代码行数:22,代码来源:RangeFacetsExample.java

示例10: setUp

import org.apache.lucene.document.NumericDocValuesField; //导入依赖的package包/类
@Override
public void setUp() throws Exception {
  super.setUp();
  dir = newDirectory();
  RandomIndexWriter iw = new RandomIndexWriter(random(), dir);
  int numDocs = TestUtil.nextInt(random(), 2049, 4000);
  for (int i = 0; i < numDocs; i++) {
    Document document = new Document();
    document.add(newTextField("english", English.intToEnglish(i), Field.Store.NO));
    document.add(newTextField("oddeven", (i % 2 == 0) ? "even" : "odd", Field.Store.NO));
    document.add(newStringField("byte", "" + ((byte) random().nextInt()), Field.Store.NO));
    document.add(newStringField("short", "" + ((short) random().nextInt()), Field.Store.NO));
    document.add(new IntField("int", random().nextInt(), Field.Store.NO));
    document.add(new LongField("long", random().nextLong(), Field.Store.NO));

    document.add(new FloatField("float", random().nextFloat(), Field.Store.NO));
    document.add(new DoubleField("double", random().nextDouble(), Field.Store.NO));

    document.add(new NumericDocValuesField("intdocvalues", random().nextInt()));
    document.add(new FloatDocValuesField("floatdocvalues", random().nextFloat()));
    iw.addDocument(document);
  }
  reader = iw.getReader();
  iw.close();
  searcher = newSearcher(reader);
}
 
开发者ID:europeana,项目名称:search,代码行数:27,代码来源:TestExpressionSorts.java

示例11: buildDocument

import org.apache.lucene.document.NumericDocValuesField; //导入依赖的package包/类
private Document buildDocument(BytesRef text, Set<BytesRef> contexts, long weight, BytesRef payload) throws IOException {
  String textString = text.utf8ToString();
  Document doc = new Document();
  FieldType ft = getTextFieldType();
  doc.add(new Field(TEXT_FIELD_NAME, textString, ft));
  doc.add(new Field("textgrams", textString, ft));
  doc.add(new StringField(EXACT_TEXT_FIELD_NAME, textString, Field.Store.NO));
  doc.add(new BinaryDocValuesField(TEXT_FIELD_NAME, text));
  doc.add(new NumericDocValuesField("weight", weight));
  if (payload != null) {
    doc.add(new BinaryDocValuesField("payloads", payload));
  }
  if (contexts != null) {
    for(BytesRef context : contexts) {
      // TODO: if we had a BinaryTermField we could fix
      // this "must be valid ut8f" limitation:
      doc.add(new StringField(CONTEXTS_FIELD_NAME, context.utf8ToString(), Field.Store.NO));
      doc.add(new SortedSetDocValuesField(CONTEXTS_FIELD_NAME, context));
    }
  }
  return doc;
}
 
开发者ID:europeana,项目名称:search,代码行数:23,代码来源:AnalyzingInfixSuggester.java

示例12: generateIndexDocuments

import org.apache.lucene.document.NumericDocValuesField; //导入依赖的package包/类
private Map<String, Document> generateIndexDocuments(int ndocs) {
  Map<String, Document> docs = new HashMap<>();
  for(int i = 0; i < ndocs ; i++) {
    Field field = new TextField(FIELD_NAME, "field_" + i, Field.Store.YES);
    Field payload = new StoredField(PAYLOAD_FIELD_NAME, new BytesRef("payload_" + i));
    Field weight1 = new NumericDocValuesField(WEIGHT_FIELD_NAME_1, 10 + i);
    Field weight2 = new NumericDocValuesField(WEIGHT_FIELD_NAME_2, 20 + i);
    Field weight3 = new NumericDocValuesField(WEIGHT_FIELD_NAME_3, 30 + i);
    Field contexts = new StoredField(CONTEXTS_FIELD_NAME, new BytesRef("ctx_"  + i + "_0"));
    Document doc = new Document();
    doc.add(field);
    doc.add(payload);
    doc.add(weight1);
    doc.add(weight2);
    doc.add(weight3);
    doc.add(contexts);
    for(int j = 1; j < atLeast(3); j++) {
      contexts.setBytesValue(new BytesRef("ctx_" + i + "_" + j));
      doc.add(contexts);
    }
    docs.put(field.stringValue(), doc);
  }
  return docs;
}
 
开发者ID:europeana,项目名称:search,代码行数:25,代码来源:DocumentValueSourceDictionaryTest.java

示例13: testNoScore

import org.apache.lucene.document.NumericDocValuesField; //导入依赖的package包/类
public void testNoScore() throws Exception {
  Directory indexDir = newDirectory();
  Directory taxoDir = newDirectory();

  DirectoryTaxonomyWriter taxoWriter = new DirectoryTaxonomyWriter(taxoDir);
  IndexWriter iw = new IndexWriter(indexDir, newIndexWriterConfig(new MockAnalyzer(random())));
  FacetsConfig config = new FacetsConfig();
  for (int i = 0; i < 4; i++) {
    Document doc = new Document();
    doc.add(new NumericDocValuesField("price", (i+1)));
    doc.add(new FacetField("a", Integer.toString(i % 2)));
    iw.addDocument(config.build(taxoWriter, doc));
  }
  
  DirectoryReader r = DirectoryReader.open(iw, true);
  DirectoryTaxonomyReader taxoReader = new DirectoryTaxonomyReader(taxoWriter);

  FacetsCollector sfc = new FacetsCollector();
  newSearcher(r).search(new MatchAllDocsQuery(), sfc);
  Facets facets = new TaxonomyFacetSumValueSource(taxoReader, config, sfc, new LongFieldSource("price"));
  assertEquals("dim=a path=[] value=10.0 childCount=2\n  1 (6.0)\n  0 (4.0)\n", facets.getTopChildren(10, "a").toString());
  
  IOUtils.close(taxoWriter, iw, taxoReader, taxoDir, r, indexDir);
}
 
开发者ID:europeana,项目名称:search,代码行数:25,代码来源:TestTaxonomyFacetSumValueSource.java

示例14: testDateCompression

import org.apache.lucene.document.NumericDocValuesField; //导入依赖的package包/类
public void testDateCompression() throws IOException {
  final Directory dir = new RAMDirectory();
  final IndexWriterConfig iwc = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
  final IndexWriter iwriter = new IndexWriter(dir, iwc);

  final long base = 13; // prime
  final long day = 1000L * 60 * 60 * 24;

  final Document doc = new Document();
  final NumericDocValuesField dvf = new NumericDocValuesField("dv", 0);
  doc.add(dvf);
  for (int i = 0; i < 300; ++i) {
    dvf.setLongValue(base + random().nextInt(1000) * day);
    iwriter.addDocument(doc);
  }
  iwriter.forceMerge(1);
  final long size1 = dirSize(dir);
  for (int i = 0; i < 50; ++i) {
    dvf.setLongValue(base + random().nextInt(1000) * day);
    iwriter.addDocument(doc);
  }
  iwriter.forceMerge(1);
  final long size2 = dirSize(dir);
  // make sure the new longs costed less than if they had only been packed
  assertTrue(size2 < size1 + (PackedInts.bitsRequired(day) * 50) / 8);
}
 
开发者ID:europeana,项目名称:search,代码行数:27,代码来源:BaseCompressingDocValuesFormatTestCase.java

示例15: testSingleBigValueCompression

import org.apache.lucene.document.NumericDocValuesField; //导入依赖的package包/类
public void testSingleBigValueCompression() throws IOException {
  final Directory dir = new RAMDirectory();
  final IndexWriterConfig iwc = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
  final IndexWriter iwriter = new IndexWriter(dir, iwc);

  final Document doc = new Document();
  final NumericDocValuesField dvf = new NumericDocValuesField("dv", 0);
  doc.add(dvf);
  for (int i = 0; i < 20000; ++i) {
    dvf.setLongValue(i & 1023);
    iwriter.addDocument(doc);
  }
  iwriter.forceMerge(1);
  final long size1 = dirSize(dir);
  dvf.setLongValue(Long.MAX_VALUE);
  iwriter.addDocument(doc);
  iwriter.forceMerge(1);
  final long size2 = dirSize(dir);
  // make sure the new value did not grow the bpv for every other value
  assertTrue(size2 < size1 + (20000 * (63 - 10)) / 8);
}
 
开发者ID:europeana,项目名称:search,代码行数:22,代码来源:BaseCompressingDocValuesFormatTestCase.java


注:本文中的org.apache.lucene.document.NumericDocValuesField类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。