当前位置: 首页>>代码示例>>Java>>正文


Java FieldType.freeze方法代码示例

本文整理汇总了Java中org.apache.lucene.document.FieldType.freeze方法的典型用法代码示例。如果您正苦于以下问题:Java FieldType.freeze方法的具体用法?Java FieldType.freeze怎么用?Java FieldType.freeze使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.lucene.document.FieldType的用法示例。


在下文中一共展示了FieldType.freeze方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: LuceneUtil

import org.apache.lucene.document.FieldType; //导入方法依赖的package包/类
public LuceneUtil(JasperReportsContext jasperReportsContext, boolean isCaseSensitive, boolean isWholeWordsOnly, boolean removeAccents) {
	this.isCaseSensitive = isCaseSensitive;
	this.isWholeWordsOnly = isWholeWordsOnly;
	this.removeAccents = removeAccents;

	this.noneSelector = JRStyledTextAttributeSelector.getNoneSelector(jasperReportsContext);
	this.styledTextUtil = JRStyledTextUtil.getInstance(jasperReportsContext);
	
	fieldType = new FieldType();
	fieldType.setIndexed(true);
	fieldType.setTokenized(true);
	fieldType.setStored(true);
	fieldType.setStoreTermVectors(true);
	fieldType.setStoreTermVectorPositions(true);
	fieldType.setStoreTermVectorOffsets(true);
	fieldType.freeze();
}
 
开发者ID:TIBCOSoftware,项目名称:jasperreports,代码行数:18,代码来源:LuceneUtil.java

示例2: testStats

import org.apache.lucene.document.FieldType; //导入方法依赖的package包/类
/** test that when freqs are omitted, that totalTermFreq and sumTotalTermFreq are -1 */
public void testStats() throws Exception {
  Directory dir = newDirectory();
  RandomIndexWriter iw = new RandomIndexWriter(random(), dir,
      newIndexWriterConfig(new MockAnalyzer(random())));
  Document doc = new Document();
  FieldType ft = new FieldType(TextField.TYPE_NOT_STORED);
  ft.setIndexOptions(IndexOptions.DOCS_ONLY);
  ft.freeze();
  Field f = newField("foo", "bar", ft);
  doc.add(f);
  iw.addDocument(doc);
  IndexReader ir = iw.getReader();
  iw.close();
  assertEquals(-1, ir.totalTermFreq(new Term("foo", new BytesRef("bar"))));
  assertEquals(-1, ir.getSumTotalTermFreq("foo"));
  ir.close();
  dir.close();
}
 
开发者ID:europeana,项目名称:search,代码行数:20,代码来源:TestOmitTf.java

示例3: addRandomFields

import org.apache.lucene.document.FieldType; //导入方法依赖的package包/类
@Override
protected void addRandomFields(Document doc) {
  for (IndexOptions opts : IndexOptions.values()) {
    final String field = "f_" + opts;
    String pf = TestUtil.getPostingsFormat(Codec.getDefault(), field);
    if (opts == IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS && doesntSupportOffsets.contains(pf)) {
      continue;
    }
    FieldType ft = new FieldType();
    ft.setIndexOptions(opts);
    ft.setIndexed(true);
    ft.setOmitNorms(true);
    ft.freeze();
    final int numFields = random().nextInt(5);
    for (int j = 0; j < numFields; ++j) {
      doc.add(new Field("f_" + opts, TestUtil.randomSimpleString(random(), 2), ft));
    }
  }
}
 
开发者ID:europeana,项目名称:search,代码行数:20,代码来源:BasePostingsFormatTestCase.java

示例4: testExcIndexingDocBeforeDocValues

import org.apache.lucene.document.FieldType; //导入方法依赖的package包/类
public void testExcIndexingDocBeforeDocValues() throws Exception {
  Directory dir = newDirectory();
  IndexWriterConfig iwc = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
  IndexWriter w = new IndexWriter(dir, iwc);
  Document doc = new Document();
  FieldType ft = new FieldType(TextField.TYPE_NOT_STORED);
  ft.setDocValueType(DocValuesType.SORTED);
  ft.freeze();
  Field field = new Field("test", "value", ft);
  field.setTokenStream(new TokenStream() {
      @Override
      public boolean incrementToken() {
        throw new RuntimeException("no");
      }
    });
  doc.add(field);
  try {
    w.addDocument(doc);
    fail("did not hit exception");
  } catch (RuntimeException re) {
    // expected
  }
  w.addDocument(new Document());
  w.close();
  dir.close();
}
 
开发者ID:europeana,项目名称:search,代码行数:27,代码来源:TestDocValuesIndexing.java

示例5: testNoNorms

import org.apache.lucene.document.FieldType; //导入方法依赖的package包/类
/** make sure we can retrieve when norms are disabled */
public void testNoNorms() throws Exception {
  Directory dir = newDirectory();
  RandomIndexWriter iw = new RandomIndexWriter(random(), dir);
  Document doc = new Document();
  FieldType ft = new FieldType(TextField.TYPE_NOT_STORED);
  ft.setOmitNorms(true);
  ft.freeze();
  doc.add(newField("foo", "bar", ft));
  iw.addDocument(doc);
  IndexReader ir = iw.getReader();
  iw.close();
  IndexSearcher is = newSearcher(ir);
  
  for (Similarity sim : sims) {
    is.setSimilarity(sim);
    BooleanQuery query = new BooleanQuery(true);
    query.add(new TermQuery(new Term("foo", "bar")), BooleanClause.Occur.SHOULD);
    assertEquals(1, is.search(query, 10).totalHits);
  }
  ir.close();
  dir.close();
}
 
开发者ID:europeana,项目名称:search,代码行数:24,代码来源:TestSimilarity2.java

示例6: testOmitTF

import org.apache.lucene.document.FieldType; //导入方法依赖的package包/类
/** make sure all sims work if TF is omitted */
public void testOmitTF() throws Exception {
  Directory dir = newDirectory();
  RandomIndexWriter iw = new RandomIndexWriter(random(), dir);
  Document doc = new Document();
  FieldType ft = new FieldType(TextField.TYPE_NOT_STORED);
  ft.setIndexOptions(IndexOptions.DOCS_ONLY);
  ft.freeze();
  Field f = newField("foo", "bar", ft);
  doc.add(f);
  iw.addDocument(doc);
  IndexReader ir = iw.getReader();
  iw.close();
  IndexSearcher is = newSearcher(ir);
  
  for (Similarity sim : sims) {
    is.setSimilarity(sim);
    BooleanQuery query = new BooleanQuery(true);
    query.add(new TermQuery(new Term("foo", "bar")), BooleanClause.Occur.SHOULD);
    assertEquals(1, is.search(query, 10).totalHits);
  }
  ir.close();
  dir.close();
}
 
开发者ID:europeana,项目名称:search,代码行数:25,代码来源:TestSimilarity2.java

示例7: testNoAbortOnBadTVSettings

import org.apache.lucene.document.FieldType; //导入方法依赖的package包/类
public void testNoAbortOnBadTVSettings() throws Exception {
  Directory dir = newDirectory();
  // Don't use RandomIndexWriter because we want to be sure both docs go to 1 seg:
  IndexWriterConfig iwc = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
  IndexWriter iw = new IndexWriter(dir, iwc);

  Document doc = new Document();
  iw.addDocument(doc);
  FieldType ft = new FieldType(StoredField.TYPE);
  ft.setStoreTermVectors(true);
  ft.freeze();
  doc.add(new Field("field", "value", ft));
  try {
    iw.addDocument(doc);
    fail("should have hit exc");
  } catch (IllegalArgumentException iae) {
    // expected
  }
  IndexReader r = DirectoryReader.open(iw, true);

  // Make sure the exc didn't lose our first document:
  assertEquals(1, r.numDocs());
  iw.close();
  r.close();
  dir.close();
}
 
开发者ID:europeana,项目名称:search,代码行数:27,代码来源:TestTermVectorsWriter.java

示例8: getAllFieldType

import org.apache.lucene.document.FieldType; //导入方法依赖的package包/类
private FieldType getAllFieldType() {
    FieldType ft = new FieldType();
    ft.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS);
    ft.setTokenized(true);
    ft.freeze();
    return ft;
}
 
开发者ID:justor,项目名称:elasticsearch_my,代码行数:8,代码来源:SimpleAllTests.java

示例9: writeEmptyTermVector

import org.apache.lucene.document.FieldType; //导入方法依赖的package包/类
private void writeEmptyTermVector(TermVectorsResponse outResponse) throws IOException {

        Directory dir = newDirectory();
        IndexWriterConfig conf = new IndexWriterConfig(new StandardAnalyzer());
        conf.setOpenMode(OpenMode.CREATE);
        IndexWriter writer = new IndexWriter(dir, conf);
        FieldType type = new FieldType(TextField.TYPE_STORED);
        type.setStoreTermVectorOffsets(true);
        type.setStoreTermVectorPayloads(false);
        type.setStoreTermVectorPositions(true);
        type.setStoreTermVectors(true);
        type.freeze();
        Document d = new Document();
        d.add(new Field("id", "abc", StringField.TYPE_STORED));

        writer.updateDocument(new Term("id", "abc"), d);
        writer.commit();
        writer.close();
        DirectoryReader dr = DirectoryReader.open(dir);
        IndexSearcher s = new IndexSearcher(dr);
        TopDocs search = s.search(new TermQuery(new Term("id", "abc")), 1);
        ScoreDoc[] scoreDocs = search.scoreDocs;
        int doc = scoreDocs[0].doc;
        Fields fields = dr.getTermVectors(doc);
        EnumSet<Flag> flags = EnumSet.of(Flag.Positions, Flag.Offsets);
        outResponse.setFields(fields, null, flags, fields);
        outResponse.setExists(true);
        dr.close();
        dir.close();

    }
 
开发者ID:justor,项目名称:elasticsearch_my,代码行数:32,代码来源:TermVectorsUnitTests.java

示例10: writeStandardTermVector

import org.apache.lucene.document.FieldType; //导入方法依赖的package包/类
private void writeStandardTermVector(TermVectorsResponse outResponse) throws IOException {

        Directory dir = newDirectory();
        IndexWriterConfig conf = new IndexWriterConfig(new StandardAnalyzer());

        conf.setOpenMode(OpenMode.CREATE);
        IndexWriter writer = new IndexWriter(dir, conf);
        FieldType type = new FieldType(TextField.TYPE_STORED);
        type.setStoreTermVectorOffsets(true);
        type.setStoreTermVectorPayloads(false);
        type.setStoreTermVectorPositions(true);
        type.setStoreTermVectors(true);
        type.freeze();
        Document d = new Document();
        d.add(new Field("id", "abc", StringField.TYPE_STORED));
        d.add(new Field("title", "the1 quick brown fox jumps over  the1 lazy dog", type));
        d.add(new Field("desc", "the1 quick brown fox jumps over  the1 lazy dog", type));

        writer.updateDocument(new Term("id", "abc"), d);
        writer.commit();
        writer.close();
        DirectoryReader dr = DirectoryReader.open(dir);
        IndexSearcher s = new IndexSearcher(dr);
        TopDocs search = s.search(new TermQuery(new Term("id", "abc")), 1);
        ScoreDoc[] scoreDocs = search.scoreDocs;
        int doc = scoreDocs[0].doc;
        Fields termVectors = dr.getTermVectors(doc);
        EnumSet<Flag> flags = EnumSet.of(Flag.Positions, Flag.Offsets);
        outResponse.setFields(termVectors, null, flags, termVectors);
        dr.close();
        dir.close();

    }
 
开发者ID:justor,项目名称:elasticsearch_my,代码行数:34,代码来源:TermVectorsUnitTests.java

示例11: getType

import org.apache.lucene.document.FieldType; //导入方法依赖的package包/类
private static FieldType getType(int numDims) {
    FieldType type = new FieldType();
    type.setDimensions(numDims, Float.BYTES);
    type.setDocValuesType(DocValuesType.NUMERIC);
    type.freeze();
    return type;
}
 
开发者ID:react-dev26,项目名称:NGB-master,代码行数:8,代码来源:SortedFloatPoint.java

示例12: getType

import org.apache.lucene.document.FieldType; //导入方法依赖的package包/类
private static FieldType getType(int numDims) {
    FieldType type = new FieldType();
    type.setDocValuesType(DocValuesType.NUMERIC);
    type.setDimensions(numDims, Integer.BYTES);
    type.freeze();
    return type;
}
 
开发者ID:react-dev26,项目名称:NGB-master,代码行数:8,代码来源:SortedIntPoint.java

示例13: createFieldType

import org.apache.lucene.document.FieldType; //导入方法依赖的package包/类
private FieldType createFieldType() {
	FieldType ft = new FieldType();
	ft.setIndexed(true);
	ft.setTokenized(true);
	ft.setOmitNorms(true);
	ft.freeze();
	return ft;
}
 
开发者ID:arne-cl,项目名称:fangorn,代码行数:9,代码来源:CreateIndex.java

示例14: getFieldType

import org.apache.lucene.document.FieldType; //导入方法依赖的package包/类
private static FieldType getFieldType() {
	FieldType ft = new FieldType();
	ft.setIndexed(true);
	ft.setTokenized(true);
	ft.setOmitNorms(true);
	ft.freeze();
	return ft;
}
 
开发者ID:arne-cl,项目名称:fangorn,代码行数:9,代码来源:IndexTestCase.java

示例15: writeStandardTermVector

import org.apache.lucene.document.FieldType; //导入方法依赖的package包/类
private void writeStandardTermVector(TermVectorsResponse outResponse) throws IOException {

        Directory dir = LuceneTestCase.newDirectory();
        IndexWriterConfig conf = new IndexWriterConfig(new StandardAnalyzer());

        conf.setOpenMode(OpenMode.CREATE);
        IndexWriter writer = new IndexWriter(dir, conf);
        FieldType type = new FieldType(TextField.TYPE_STORED);
        type.setStoreTermVectorOffsets(true);
        type.setStoreTermVectorPayloads(false);
        type.setStoreTermVectorPositions(true);
        type.setStoreTermVectors(true);
        type.freeze();
        Document d = new Document();
        d.add(new Field("id", "abc", StringField.TYPE_STORED));
        d.add(new Field("plaintext", "the1 quick brown fox jumps over  the1 lazy dog comment", type));
        d.add(new Field("desc", "the1 quick brown fox jumps over  the1 lazy dog comment", type));

        writer.updateDocument(new Term("id", "abc"), d);
        writer.commit();
        writer.close();
        DirectoryReader dr = DirectoryReader.open(dir);
        IndexSearcher s = new IndexSearcher(dr);
        TopDocs search = s.search(new TermQuery(new Term("id", "abc")), 1);
        ScoreDoc[] scoreDocs = search.scoreDocs;
        int doc = scoreDocs[0].doc;
        Fields termVectors = dr.getTermVectors(doc);
        EnumSet<Flag> flags = EnumSet.of(Flag.Positions, Flag.Offsets);
        outResponse.setFields(termVectors, null, flags, termVectors);
        dr.close();
        dir.close();

    }
 
开发者ID:dlcs,项目名称:the-mathmos-server,代码行数:34,代码来源:TextSearchServiceImplTest.java


注:本文中的org.apache.lucene.document.FieldType.freeze方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。