本文整理汇总了Java中org.apache.lucene.index.IndexWriter.commit方法的典型用法代码示例。如果您正苦于以下问题:Java IndexWriter.commit方法的具体用法?Java IndexWriter.commit怎么用?Java IndexWriter.commit使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.lucene.index.IndexWriter
的用法示例。
在下文中一共展示了IndexWriter.commit方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testRamDirectory
import org.apache.lucene.index.IndexWriter; //导入方法依赖的package包/类
public void testRamDirectory() throws IOException {
long start = System.currentTimeMillis();
IndexWriterConfig indexWriterConfig = new IndexWriterConfig(new WhitespaceAnalyzer()).setOpenMode(IndexWriterConfig
.OpenMode.CREATE);
RAMDirectory ramDirectory = new RAMDirectory();
IndexWriter indexWriter = new IndexWriter(ramDirectory, indexWriterConfig);
for (int i = 0; i < 10000000; i++) {
indexWriter.addDocument(addDocument(i));
}
indexWriter.commit();
indexWriter.close();
long end = System.currentTimeMillis();
log.error("RamDirectory consumes {}s!", (end - start) / 1000);
start = System.currentTimeMillis();
IndexSearcher indexSearcher = new IndexSearcher(DirectoryReader.open(ramDirectory));
int total = 0;
for (int i = 0; i < 10000000; i++) {
TermQuery key1 = new TermQuery(new Term("key1", "key" + i));
TopDocs search = indexSearcher.search(key1, 10);
total += search.totalHits;
}
System.out.println(total);
end = System.currentTimeMillis();
log.error("RamDirectory search consumes {}ms!", (end - start));
}
示例2: test
import org.apache.lucene.index.IndexWriter; //导入方法依赖的package包/类
@Test
public void test() throws Exception {
Path path = FileSystems.getDefault().getPath("", "index");
Directory directory = FSDirectory.open(path);
Analyzer analyzer = new StandardAnalyzer();
IndexWriterConfig indexWriterConfig = new IndexWriterConfig(analyzer).setOpenMode(IndexWriterConfig.OpenMode.CREATE);
IndexWriter indexWriter = new IndexWriter(directory, indexWriterConfig);
Document document = new Document();
document.add(new LegacyLongField("id", 5499, Field.Store.YES));
document.add(new Field("title", "小米6", TYPE_STORED));
document.add(new Field("sellPoint", "骁龙835,6G内存,双摄!", TYPE_STORED));
document.
indexWriter.addDocument(document);
indexWriter.commit();
indexWriter.close();
}
示例3: assertCompressionEquals
import org.apache.lucene.index.IndexWriter; //导入方法依赖的package包/类
private void assertCompressionEquals(Mode expected, Codec actual) throws Exception {
Directory dir = newDirectory();
IndexWriterConfig iwc = newIndexWriterConfig(null);
iwc.setCodec(actual);
IndexWriter iw = new IndexWriter(dir, iwc);
iw.addDocument(new Document());
iw.commit();
iw.close();
DirectoryReader ir = DirectoryReader.open(dir);
SegmentReader sr = (SegmentReader) ir.leaves().get(0).reader();
String v = sr.getSegmentInfo().info.getAttribute(Lucene50StoredFieldsFormat.MODE_KEY);
assertNotNull(v);
assertEquals(expected, Mode.valueOf(v));
ir.close();
dir.close();
}
示例4: testCanOpenIndex
import org.apache.lucene.index.IndexWriter; //导入方法依赖的package包/类
public void testCanOpenIndex() throws IOException {
final ShardId shardId = new ShardId("index", "_na_", 1);
IndexWriterConfig iwc = newIndexWriterConfig();
Path tempDir = createTempDir();
final BaseDirectoryWrapper dir = newFSDirectory(tempDir);
assertFalse(Store.canOpenIndex(logger, tempDir, shardId, (id, l) -> new DummyShardLock(id)));
IndexWriter writer = new IndexWriter(dir, iwc);
Document doc = new Document();
doc.add(new StringField("id", "1", random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
writer.addDocument(doc);
writer.commit();
writer.close();
assertTrue(Store.canOpenIndex(logger, tempDir, shardId, (id, l) -> new DummyShardLock(id)));
DirectoryService directoryService = new DirectoryService(shardId, INDEX_SETTINGS) {
@Override
public Directory newDirectory() throws IOException {
return dir;
}
};
Store store = new Store(shardId, INDEX_SETTINGS, directoryService, new DummyShardLock(shardId));
store.markStoreCorrupted(new CorruptIndexException("foo", "bar"));
assertFalse(Store.canOpenIndex(logger, tempDir, shardId, (id, l) -> new DummyShardLock(id)));
store.close();
}
示例5: commitIndexWriter
import org.apache.lucene.index.IndexWriter; //导入方法依赖的package包/类
private void commitIndexWriter(IndexWriter writer, Translog translog, String syncId) throws IOException {
try {
Translog.TranslogGeneration translogGeneration = translog.getGeneration();
logger.trace("committing writer with translog id [{}] and sync id [{}] ", translogGeneration.translogFileGeneration, syncId);
Map<String, String> commitData = new HashMap<>(2);
commitData.put(Translog.TRANSLOG_GENERATION_KEY, Long.toString(translogGeneration.translogFileGeneration));
commitData.put(Translog.TRANSLOG_UUID_KEY, translogGeneration.translogUUID);
if (syncId != null) {
commitData.put(Engine.SYNC_COMMIT_ID, syncId);
}
indexWriter.setCommitData(commitData);
writer.commit();
} catch (Throwable ex) {
failEngine("lucene commit failed", ex);
throw ex;
}
}
示例6: createWriter
import org.apache.lucene.index.IndexWriter; //导入方法依赖的package包/类
/**
* Creates a new {@link IndexWriter} for the {@link #directory} and sets it to {@link #writer}.
* If an {@link IOException} during creation will be thrown it will be logged and the old
* {@link IndexWriter} will be kept.
*/
private void createWriter() {
// create IndexWriter
final IndexWriterConfig config = new IndexWriterConfig(IndexManager.LUCENE_VERSION, DEFAULT_ANALYZER);
try {
unlock();
writer = new IndexWriter(directory, config);
writer.commit();
} catch (final IOException e) {
LOGGER.error("Can't create IndexWriter for path "
+ "{} because of an exception.", location.getAbsolutePath(), e);
}
}
示例7: testRedisDirectoryWithRemoteJedisPool
import org.apache.lucene.index.IndexWriter; //导入方法依赖的package包/类
public void
testRedisDirectoryWithRemoteJedisPool() throws IOException {
long start = System.currentTimeMillis();
IndexWriterConfig indexWriterConfig = new IndexWriterConfig(new WhitespaceAnalyzer()).setOpenMode(IndexWriterConfig
.OpenMode.CREATE);
JedisPool jedisPool = new JedisPool(new JedisPoolConfig(), "10.97.19.55", 6379, Constants.TIME_OUT);
RedisDirectory redisDirectory = new RedisDirectory(new JedisPoolStream(jedisPool));
IndexWriter indexWriter = new IndexWriter(redisDirectory, indexWriterConfig);
for (int i = 0; i < 5000000; i++) {
indexWriter.addDocument(addDocument(i));
}
indexWriter.commit();
indexWriter.close();
redisDirectory.close();
long end = System.currentTimeMillis();
log.error("RedisDirectoryWithJedisPool consumes {}s!", (end - start) / 1000);
start = System.currentTimeMillis();
IndexSearcher indexSearcher = new IndexSearcher(DirectoryReader.open(new RedisDirectory(new JedisStream("localhost",
6379))));
int total = 0;
for (int i = 0; i < 1000000; i++) {
TermQuery key1 = new TermQuery(new Term("key1", "key" + i));
TopDocs search = indexSearcher.search(key1, 10);
total += search.totalHits;
}
System.out.println(total);
end = System.currentTimeMillis();
log.error("RedisDirectoryWithJedisPool search consumes {}ms!", (end - start));
}
示例8: testRedisDirectoryWithJedisPool
import org.apache.lucene.index.IndexWriter; //导入方法依赖的package包/类
public void testRedisDirectoryWithJedisPool() throws IOException {
long start = System.currentTimeMillis();
IndexWriterConfig indexWriterConfig = new IndexWriterConfig(new WhitespaceAnalyzer()).setOpenMode(IndexWriterConfig
.OpenMode.CREATE);
//indexWriterConfig.setInfoStream(System.out);
//indexWriterConfig.setRAMBufferSizeMB(2048);
//LogByteSizeMergePolicy logByteSizeMergePolicy = new LogByteSizeMergePolicy();
//logByteSizeMergePolicy.setMinMergeMB(1);
//logByteSizeMergePolicy.setMaxMergeMB(64);
//logByteSizeMergePolicy.setMaxCFSSegmentSizeMB(64);
//indexWriterConfig.setRAMBufferSizeMB(1024).setMergePolicy(logByteSizeMergePolicy).setUseCompoundFile(false);
//GenericObjectPoolConfig genericObjectPoolConfig = new GenericObjectPoolConfig();
//获取连接等待时间
//genericObjectPoolConfig.setMaxWaitMillis(3000);
//10s超时时间
JedisPool jedisPool = new JedisPool(new JedisPoolConfig(), "localhost", 6379, Constants.TIME_OUT);
RedisDirectory redisDirectory = new RedisDirectory(new JedisPoolStream(jedisPool));
IndexWriter indexWriter = new IndexWriter(redisDirectory, indexWriterConfig);
for (int i = 0; i < 10000000; i++) {
indexWriter.addDocument(addDocument(i));
}
indexWriter.commit();
indexWriter.close();
redisDirectory.close();
long end = System.currentTimeMillis();
log.error("RedisDirectoryWithJedisPool consumes {}s!", (end - start) / 1000);
start = System.currentTimeMillis();
IndexSearcher indexSearcher = new IndexSearcher(DirectoryReader.open(new RedisDirectory(new JedisStream("localhost",
6379))));
int total = 0;
for (int i = 0; i < 10000000; i++) {
TermQuery key1 = new TermQuery(new Term("key1", "key" + i));
TopDocs search = indexSearcher.search(key1, 10);
total += search.totalHits;
}
System.out.println(total);
end = System.currentTimeMillis();
log.error("RedisDirectoryWithJedisPool search consumes {}ms!", (end - start));
}
示例9: testRedisDirectoryWithJedis
import org.apache.lucene.index.IndexWriter; //导入方法依赖的package包/类
public void testRedisDirectoryWithJedis() throws IOException {
long start = System.currentTimeMillis();
IndexWriterConfig indexWriterConfig = new IndexWriterConfig(new WhitespaceAnalyzer()).setOpenMode(IndexWriterConfig
.OpenMode.CREATE);
//indexWriterConfig.setInfoStream(System.out);
//indexWriterConfig.setRAMBufferSizeMB(2048);
//LogByteSizeMergePolicy logByteSizeMergePolicy = new LogByteSizeMergePolicy();
//logByteSizeMergePolicy.setMinMergeMB(1);
//logByteSizeMergePolicy.setMaxMergeMB(64);
//logByteSizeMergePolicy.setMaxCFSSegmentSizeMB(64);
//indexWriterConfig.setRAMBufferSizeMB(1024).setMergePolicy(logByteSizeMergePolicy).setUseCompoundFile(false);
//GenericObjectPoolConfig genericObjectPoolConfig = new GenericObjectPoolConfig();
//获取连接等待时间
//genericObjectPoolConfig.setMaxWaitMillis(3000);
//10s超时时间
RedisDirectory redisDirectory = new RedisDirectory(new JedisStream("localhost", 6379));
IndexWriter indexWriter = new IndexWriter(redisDirectory, indexWriterConfig);
for (int i = 0; i < 10000000; i++) {
indexWriter.addDocument(addDocument(i));
}
indexWriter.commit();
indexWriter.close();
redisDirectory.close();
long end = System.currentTimeMillis();
log.error("RedisDirectoryWithJedis consumes {}s!", (end - start) / 1000);
start = System.currentTimeMillis();
IndexSearcher indexSearcher = new IndexSearcher(DirectoryReader.open(new RedisDirectory(new JedisStream("localhost",
6379))));
int total = 0;
for (int i = 0; i < 10000000; i++) {
TermQuery key1 = new TermQuery(new Term("key1", "key" + i));
TopDocs search = indexSearcher.search(key1, 10);
total += search.totalHits;
}
System.out.println(total);
end = System.currentTimeMillis();
log.error("RedisDirectoryWithJedis search consumes {}ms!", (end - start));
}
示例10: initSearcher
import org.apache.lucene.index.IndexWriter; //导入方法依赖的package包/类
@Before
public void initSearcher() throws IOException {
dir = newDirectory();
w = new IndexWriter(dir, newIndexWriterConfig(new StandardAnalyzer()));
Document d = new Document();
d.add(new TextField(FIELD, TEXT, Field.Store.YES));
d.add(new TextField("_uid", "1", Field.Store.YES));
w.addDocument(d);
w.commit();
reader = DirectoryReader.open(w);
searcher = newSearcher(reader);
}
示例11: testLoadGlobal_neverCacheIfFieldIsMissing
import org.apache.lucene.index.IndexWriter; //导入方法依赖的package包/类
public void testLoadGlobal_neverCacheIfFieldIsMissing() throws Exception {
Directory dir = newDirectory();
IndexWriterConfig iwc = new IndexWriterConfig(null);
iwc.setMergePolicy(NoMergePolicy.INSTANCE);
IndexWriter iw = new IndexWriter(dir, iwc);
long numDocs = scaledRandomIntBetween(32, 128);
for (int i = 1; i <= numDocs; i++) {
Document doc = new Document();
doc.add(new SortedSetDocValuesField("field1", new BytesRef(String.valueOf(i))));
doc.add(new StringField("field2", String.valueOf(i), Field.Store.NO));
iw.addDocument(doc);
if (i % 24 == 0) {
iw.commit();
}
}
iw.close();
DirectoryReader ir = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(dir), new ShardId("_index", "_na_", 0));
DummyAccountingFieldDataCache fieldDataCache = new DummyAccountingFieldDataCache();
// Testing SortedSetDVOrdinalsIndexFieldData:
SortedSetDVOrdinalsIndexFieldData sortedSetDVOrdinalsIndexFieldData = createSortedDV("field1", fieldDataCache);
sortedSetDVOrdinalsIndexFieldData.loadGlobal(ir);
assertThat(fieldDataCache.cachedGlobally, equalTo(1));
sortedSetDVOrdinalsIndexFieldData.loadGlobal(new FieldMaskingReader("field1", ir));
assertThat(fieldDataCache.cachedGlobally, equalTo(1));
// Testing PagedBytesIndexFieldData
PagedBytesIndexFieldData pagedBytesIndexFieldData = createPagedBytes("field2", fieldDataCache);
pagedBytesIndexFieldData.loadGlobal(ir);
assertThat(fieldDataCache.cachedGlobally, equalTo(2));
pagedBytesIndexFieldData.loadGlobal(new FieldMaskingReader("field2", ir));
assertThat(fieldDataCache.cachedGlobally, equalTo(2));
ir.close();
dir.close();
}
示例12: deleteIndex
import org.apache.lucene.index.IndexWriter; //导入方法依赖的package包/类
/**
* 删除指定博客的索引
*
* @param userid
* @throws Exception
*/
public void deleteIndex(String userid) throws Exception {
IndexWriter writer = getWriter();
writer.deleteDocuments(new Term("userid", userid));
writer.forceMergeDeletes(); // 强制删除
writer.commit();
writer.close();
}
示例13: writeEmptyTermVector
import org.apache.lucene.index.IndexWriter; //导入方法依赖的package包/类
private void writeEmptyTermVector(TermVectorsResponse outResponse) throws IOException {
Directory dir = newDirectory();
IndexWriterConfig conf = new IndexWriterConfig(new StandardAnalyzer());
conf.setOpenMode(OpenMode.CREATE);
IndexWriter writer = new IndexWriter(dir, conf);
FieldType type = new FieldType(TextField.TYPE_STORED);
type.setStoreTermVectorOffsets(true);
type.setStoreTermVectorPayloads(false);
type.setStoreTermVectorPositions(true);
type.setStoreTermVectors(true);
type.freeze();
Document d = new Document();
d.add(new Field("id", "abc", StringField.TYPE_STORED));
writer.updateDocument(new Term("id", "abc"), d);
writer.commit();
writer.close();
DirectoryReader dr = DirectoryReader.open(dir);
IndexSearcher s = new IndexSearcher(dr);
TopDocs search = s.search(new TermQuery(new Term("id", "abc")), 1);
ScoreDoc[] scoreDocs = search.scoreDocs;
int doc = scoreDocs[0].doc;
Fields fields = dr.getTermVectors(doc);
EnumSet<Flag> flags = EnumSet.of(Flag.Positions, Flag.Offsets);
outResponse.setFields(fields, null, flags, fields);
outResponse.setExists(true);
dr.close();
dir.close();
}
示例14: indexSomeData
import org.apache.lucene.index.IndexWriter; //导入方法依赖的package包/类
/**
* Indexes three sample records.
*
* @param writer the {@link IndexWriter} instance used for storing data.
* @throws IOException in case of I/O failure.
*/
public static void indexSomeData(IndexWriter writer) throws IOException {
writer.addDocument(newBook("1", "Apache Solr Essentials", "Andrea Gazzarini"));
writer.addDocument(newBook("2", "Apache Solr FullText Search Server", "John White"));
writer.addDocument(newBook("3", "Enterprise Search with Apache Solr", "Martin Green"));
writer.commit();
}
示例15: setUp
import org.apache.lucene.index.IndexWriter; //导入方法依赖的package包/类
/**
* Setup all what we need for this test case.
*
* @throws IOException hopefully never, otherwise the test will fail.
*/
@Before
public void setUp() throws IOException {
// 1. Creates a directory reference. This is where index datafiles will be created.
directory = FSDirectory.open(new File("/tmp").toPath());
// 2. Creates an IndexWriter
writer = new IndexWriter(directory, new IndexWriterConfig());
// 3. Add some data
writer.addDocument(newBook("1", "Apache Solr Essentials", "Andrea Gazzarini"));
writer.addDocument(newBook("2", "Apache Solr FullText Search Server", "John White"));
writer.addDocument(newBook("3", "Enterprise Search with Apache Solr", "Martin Green"));
writer.commit();
}