当前位置: 首页>>代码示例>>Java>>正文


Java RAMDirectory.close方法代码示例

本文整理汇总了Java中org.apache.lucene.store.RAMDirectory.close方法的典型用法代码示例。如果您正苦于以下问题:Java RAMDirectory.close方法的具体用法?Java RAMDirectory.close怎么用?Java RAMDirectory.close使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.lucene.store.RAMDirectory的用法示例。


在下文中一共展示了RAMDirectory.close方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testReadWrite

import org.apache.lucene.store.RAMDirectory; //导入方法依赖的package包/类
@Test
public void testReadWrite() {
  Vector v1 = new ComplexVector(new short[] { -1, 8000, 16000 });
  RAMDirectory directory = new RAMDirectory();
  try {
    IndexOutput indexOutput = directory.createOutput("complexvectors.bin", IOContext.DEFAULT);
    v1.writeToLuceneStream(indexOutput);
    indexOutput.close();

    IndexInput indexInput = directory.openInput("complexvectors.bin", IOContext.DEFAULT);
    ComplexVector cv2 = new ComplexVector(3, Mode.POLAR_SPARSE);
    cv2.readFromLuceneStream(indexInput);
    assertFloatArrayEquals(
        new float[] {0, 0, -0.997290f, 0.073564f, 0.989176f, -0.1467304f},
        cv2.getCoordinates(), TOL);
  } catch (IOException e) {
    e.printStackTrace();
    fail();
  }
  directory.close();
}
 
开发者ID:semanticvectors,项目名称:semanticvectors,代码行数:22,代码来源:ComplexVectorTest.java

示例2: testGenerateRandomVectorWriteAndRead

import org.apache.lucene.store.RAMDirectory; //导入方法依赖的package包/类
@Test
public void testGenerateRandomVectorWriteAndRead() {
  Random random = new Random(0);

  Vector vector = VectorFactory.generateRandomVector(VectorType.BINARY, 64, 2, random);
  assertEquals("1100001111010001111111011010000000111011100001100010010010001111", vector.writeToString());

  RAMDirectory directory = new RAMDirectory();
  try {
    IndexOutput indexOutput = directory.createOutput("binaryvectors.bin", IOContext.DEFAULT);
    vector.writeToLuceneStream(indexOutput);
    indexOutput.close();
    IndexInput indexInput = directory.openInput("binaryvectors.bin", IOContext.DEFAULT);
    Vector vector2 = VectorFactory.createZeroVector(VectorType.BINARY, 64);
    assertEquals("0000000000000000000000000000000000000000000000000000000000000000", vector2.writeToString());
    vector2.readFromLuceneStream(indexInput);
    assertEquals("1100001111010001111111011010000000111011100001100010010010001111", vector2.writeToString());
  } catch (IOException e) {
    e.printStackTrace();
    fail();
  }
  directory.close();
}
 
开发者ID:semanticvectors,项目名称:semanticvectors,代码行数:24,代码来源:BinaryVectorTest.java

示例3: testSave

import org.apache.lucene.store.RAMDirectory; //导入方法依赖的package包/类
public void testSave() throws IOException {
  final int valueCount = TestUtil.nextInt(random(), 1, 2048);
  for (int bpv = 1; bpv <= 64; ++bpv) {
    final int maxValue = (int) Math.min(PackedInts.maxValue(31), PackedInts.maxValue(bpv));
    final RAMDirectory directory = new RAMDirectory();
    List<PackedInts.Mutable> packedInts = createPackedInts(valueCount, bpv);
    for (PackedInts.Mutable mutable : packedInts) {
      for (int i = 0; i < mutable.size(); ++i) {
        mutable.set(i, random().nextInt(maxValue));
      }

      IndexOutput out = directory.createOutput("packed-ints.bin", IOContext.DEFAULT);
      mutable.save(out);
      out.close();

      IndexInput in = directory.openInput("packed-ints.bin", IOContext.DEFAULT);
      PackedInts.Reader reader = PackedInts.getReader(in);
      assertEquals(valueCount, reader.size());
      if (mutable instanceof Packed64SingleBlock) {
        // make sure that we used the right format so that the reader has
        // the same performance characteristics as the mutable that has been
        // serialized
        assertTrue(reader instanceof Packed64SingleBlock);
      } else {
        assertFalse(reader instanceof Packed64SingleBlock);
      }
      for (int i = 0; i < valueCount; ++i) {
        assertEquals(mutable.get(i), reader.get(i));
      }
      in.close();
      directory.deleteFile("packed-ints.bin");
    }
    directory.close();
  }
}
 
开发者ID:europeana,项目名称:search,代码行数:36,代码来源:TestPackedInts.java

示例4: close

import org.apache.lucene.store.RAMDirectory; //导入方法依赖的package包/类
@Override
public synchronized void close()
{
    for ( RAMDirectory ramDirectory : directories.values() )
    {
        ramDirectory.close();
    }
    directories.clear();
}
 
开发者ID:neo4j-contrib,项目名称:neo4j-lucene5-index,代码行数:10,代码来源:DirectoryFactory.java

示例5: testWickedLongTerm

import org.apache.lucene.store.RAMDirectory; //导入方法依赖的package包/类
/**
 * Make sure we skip wicked long terms.
*/
public void testWickedLongTerm() throws IOException {
  RAMDirectory dir = new RAMDirectory();
  IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new ClassicAnalyzer()));

  char[] chars = new char[IndexWriter.MAX_TERM_LENGTH];
  Arrays.fill(chars, 'x');
  Document doc = new Document();
  final String bigTerm = new String(chars);

  // This produces a too-long term:
  String contents = "abc xyz x" + bigTerm + " another term";
  doc.add(new TextField("content", contents, Field.Store.NO));
  writer.addDocument(doc);

  // Make sure we can add another normal document
  doc = new Document();
  doc.add(new TextField("content", "abc bbb ccc", Field.Store.NO));
  writer.addDocument(doc);
  writer.close();

  IndexReader reader = IndexReader.open(dir);

  // Make sure all terms < max size were indexed
  assertEquals(2, reader.docFreq(new Term("content", "abc")));
  assertEquals(1, reader.docFreq(new Term("content", "bbb")));
  assertEquals(1, reader.docFreq(new Term("content", "term")));
  assertEquals(1, reader.docFreq(new Term("content", "another")));

  // Make sure position is still incremented when
  // massive term is skipped:
  DocsAndPositionsEnum tps = MultiFields.getTermPositionsEnum(reader,
                                                              MultiFields.getLiveDocs(reader),
                                                              "content",
                                                              new BytesRef("another"));
  assertTrue(tps.nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
  assertEquals(1, tps.freq());
  assertEquals(3, tps.nextPosition());

  // Make sure the doc that has the massive term is in
  // the index:
  assertEquals("document with wicked long term should is not in the index!", 2, reader.numDocs());

  reader.close();

  // Make sure we can add a document with exactly the
  // maximum length term, and search on that term:
  doc = new Document();
  doc.add(new TextField("content", bigTerm, Field.Store.NO));
  ClassicAnalyzer sa = new ClassicAnalyzer();
  sa.setMaxTokenLength(100000);
  writer  = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, sa));
  writer.addDocument(doc);
  writer.close();
  reader = IndexReader.open(dir);
  assertEquals(1, reader.docFreq(new Term("content", bigTerm)));
  reader.close();

  dir.close();
}
 
开发者ID:europeana,项目名称:search,代码行数:63,代码来源:TestClassicAnalyzer.java

示例6: getCosineSimilarityMatrix

import org.apache.lucene.store.RAMDirectory; //导入方法依赖的package包/类
public static DocVector[] getCosineSimilarityMatrix(List<String> fileSentences) throws IOException{

		RAMDirectory ramDir = new RAMDirectory();
		FileReader fr=new FileReader(new File("lib/stoplists/en.txt"));

		//	Set<String> stopWords = new HashSet<String>(FileUtils.readLines(new File("stop-words.txt")));  
		Analyzer analyzer = new StopAnalyzer(Version.LUCENE_36, fr );
		//Index the full text of both documents
		//IndexWriter writer = new IndexWriter(ramDir, new StandardAnalyzer(Version.LUCENE_36), true, IndexWriter.MaxFieldLength.UNLIMITED);
		IndexWriter writer =new IndexWriter(ramDir, new IndexWriterConfig(Version.LUCENE_36, analyzer));
		for (String s:fileSentences)
		{
			Document doc1 = new Document();
			StringReader d1reader=new StringReader(s);
			doc1.add(new Field("contents", d1reader, TermVector.YES));
			writer.addDocument(doc1);
		}


		//  writer.commit();
		writer.close();

		DocVector[] docs = new DocVector[fileSentences.size()];
		//Build a term vector for each document
		IndexReader RAMreader = IndexReader.open(ramDir);
		Map<String,Integer> terms = new HashMap<String,Integer>();
		TermEnum termEnum = RAMreader.terms(new Term("contents"));

		//System.out.println(RAMreader.numDocs());
		int pos = 0;
	    while (termEnum.next()) {
	      Term term = termEnum.term();
	      if (!"contents".equals(term.field())) 
	        break;
	      terms.put(term.text(), pos++);
	    }
	    
	    //System.out.println("Num terms:"+terms.size());
   
		for(int i=0;i<fileSentences.size();i++)
		{
			TermFreqVector[] tfvs = RAMreader.getTermFreqVectors(i);
			docs[i]=new DocVector(terms);
			if (tfvs==null)
					continue;
			for (TermFreqVector tfv : tfvs) 
			{
				String[] termTexts = tfv.getTerms();
				int[] termFreqs = tfv.getTermFrequencies();
				for (int j = 0; j < termTexts.length; j++) {
					double idfValue=getIDF(RAMreader,termTexts[j]);
					double tfIdfValue=termFreqs[j]*idfValue;
					docs[i].setEntry(termTexts[j], tfIdfValue);
				}
			       
			}
			docs[i].normalize();
			
			
		}
		

		RAMreader.close();
		ramDir.close();
		//ramDir.close();
		//System.out.println(RAMreader.numDocs());
		//System.out.println("Similarity:" + calcCosineSimilarity(docs[5], docs[19]));
		return docs;

	}
 
开发者ID:siddBanPsu,项目名称:WikiKreator,代码行数:71,代码来源:CosineDocumentSimilarity.java


注:本文中的org.apache.lucene.store.RAMDirectory.close方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。