当前位置: 首页>>代码示例>>Java>>正文


Java NativeFSLockFactory类代码示例

本文整理汇总了Java中org.apache.lucene.store.NativeFSLockFactory的典型用法代码示例。如果您正苦于以下问题:Java NativeFSLockFactory类的具体用法?Java NativeFSLockFactory怎么用?Java NativeFSLockFactory使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


NativeFSLockFactory类属于org.apache.lucene.store包,在下文中一共展示了NativeFSLockFactory类的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testNativeLockErrorOnStartup

import org.apache.lucene.store.NativeFSLockFactory; //导入依赖的package包/类
@Test
public void testNativeLockErrorOnStartup() throws Exception {

  Directory directory = newFSDirectory(new File(dataDir, "index"), new NativeFSLockFactory());
  //creates a new IndexWriter without releasing the lock yet
  IndexWriter indexWriter = new IndexWriter(directory, new IndexWriterConfig(Version.LUCENE_40, null));

  try {
    //opening a new core on the same index
    initCore("solrconfig-nativelock.xml", "schema.xml");
    fail("Expected " + LockObtainFailedException.class.getSimpleName());
  } catch(Throwable t) {
    assertTrue(t instanceof RuntimeException);
    assertNotNull(t.getCause());
    assertTrue(t.getCause() instanceof RuntimeException);
    assertNotNull(t.getCause().getCause());
    assertTrue(t.getCause().getCause() instanceof  LockObtainFailedException);
  } finally {
    indexWriter.close();
    directory.close();
    deleteCore();
  }
}
 
开发者ID:pkarmstr,项目名称:NYBC,代码行数:24,代码来源:SolrCoreCheckLockOnStartupTest.java

示例2: openStreams

import org.apache.lucene.store.NativeFSLockFactory; //导入依赖的package包/类
/**
 * Opens all the needed streams that the engine needs to work properly.
 * 
 * @throws IndexException
 */
private void openStreams() throws IndexException {
	try {
		if (_nativeLocking) {
			_storage = FSDirectory.open(new File(INDEX_DIR), new NativeFSLockFactory(INDEX_DIR));
		} else {
			_storage = FSDirectory.open(new File(INDEX_DIR));
		}

		IndexWriterConfig conf = new IndexWriterConfig(Version.LUCENE_32 ,ANALYZER);
		conf.setMaxBufferedDocs(_maxDocsBuffer);
		conf.setRAMBufferSizeMB(_maxRAMBufferSize);

		_iWriter = new IndexWriter(_storage, conf);
	} catch (IOException e) {
		closeAll();

		throw new IndexException("Unable to initialize the index", e);
	}
}
 
开发者ID:f4bio,项目名称:drftpd3-extended,代码行数:25,代码来源:LuceneEngine.java

示例3: LogSearch

import org.apache.lucene.store.NativeFSLockFactory; //导入依赖的package包/类
public LogSearch(File file) throws Exception
{
    this.file = file;
    directory = new NIOFSDirectory(file, new NativeFSLockFactory());
    reader = IndexReader.open(directory);
    searcher = new IndexSearcher(reader);
}
 
开发者ID:dcos,项目名称:exhibitor,代码行数:8,代码来源:LogSearch.java

示例4: testNativeLockErrorOnStartup

import org.apache.lucene.store.NativeFSLockFactory; //导入依赖的package包/类
@Test
public void testNativeLockErrorOnStartup() throws Exception {

  File indexDir = new File(initCoreDataDir, "index");
  log.info("Acquiring lock on {}", indexDir.getAbsolutePath());
  Directory directory = newFSDirectory(indexDir, new NativeFSLockFactory());
  //creates a new IndexWriter without releasing the lock yet
  IndexWriter indexWriter = new IndexWriter(directory, new IndexWriterConfig(TEST_VERSION_CURRENT, null));

  ignoreException("locked");
  try {
    System.setProperty("solr.tests.lockType","native");
    //opening a new core on the same index
    initCore("solrconfig-basic.xml", "schema.xml");
    CoreContainer cc = h.getCoreContainer();
    if (checkForCoreInitException(LockObtainFailedException.class))
      return;
    fail("Expected " + LockObtainFailedException.class.getSimpleName());
  } finally {
    System.clearProperty("solr.tests.lockType");
    unIgnoreException("locked");
    indexWriter.close();
    directory.close();
    deleteCore();
  }
}
 
开发者ID:europeana,项目名称:search,代码行数:27,代码来源:SolrCoreCheckLockOnStartupTest.java

示例5: newDirectory

import org.apache.lucene.store.NativeFSLockFactory; //导入依赖的package包/类
/**
 * Gets the Lucene Directory the has been configured to store the index.
 * @return the directory
 * @throws IOException if an exception occurs
 */
private Directory newDirectory() throws IOException {
  File fDir = new File(this.luceneConfig.getIndexLocation());
  NativeFSLockFactory nativeLockFactory = this.getNativeLockFactory();
  if (nativeLockFactory != null) {
    return FSDirectory.open(fDir,nativeLockFactory);
  } else {
    return FSDirectory.open(fDir);
  }
}
 
开发者ID:GeoinformationSystems,项目名称:GeoprocessingAppstore,代码行数:15,代码来源:LuceneIndexAdapter.java

示例6: getDirectory

import org.apache.lucene.store.NativeFSLockFactory; //导入依赖的package包/类
/**
 * Gets the Lucene Directory assoctated with the index.
 * @return the directory
 * @throws IOException if an I/O exception occurs
 */
protected Directory getDirectory() throws IOException {
  File fDir = new File(this.getIndexReference().getIndexLocation());
  NativeFSLockFactory nativeLockFactory = this.getNativeLockFactory();
  if (nativeLockFactory != null) {
    return FSDirectory.open(fDir,nativeLockFactory);
  } else {
    return FSDirectory.open(fDir);
  }
}
 
开发者ID:GeoinformationSystems,项目名称:GeoprocessingAppstore,代码行数:15,代码来源:AsnBaseIndexAdapter.java

示例7: injectLockFactory

import org.apache.lucene.store.NativeFSLockFactory; //导入依赖的package包/类
private static Directory injectLockFactory(Directory dir, String lockPath,
    String rawLockType) throws IOException {
  if (null == rawLockType) {
    // we default to "simple" for backwards compatibility
    log.warn("No lockType configured for " + dir + " assuming 'simple'");
    rawLockType = "simple";
  }
  final String lockType = rawLockType.toLowerCase(Locale.ROOT).trim();
  
  if ("simple".equals(lockType)) {
    // multiple SimpleFSLockFactory instances should be OK
    dir.setLockFactory(new SimpleFSLockFactory(lockPath));
  } else if ("native".equals(lockType)) {
    dir.setLockFactory(new NativeFSLockFactory(lockPath));
  } else if ("single".equals(lockType)) {
    if (!(dir.getLockFactory() instanceof SingleInstanceLockFactory)) dir
        .setLockFactory(new SingleInstanceLockFactory());
  } else if ("none".equals(lockType)) {
    // Recipe for disaster
    log.error("CONFIGURATION WARNING: locks are disabled on " + dir);
    dir.setLockFactory(NoLockFactory.getNoLockFactory());
  } else {
    throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
        "Unrecognized lockType: " + rawLockType);
  }
  return dir;
}
 
开发者ID:pkarmstr,项目名称:NYBC,代码行数:28,代码来源:CachingDirectoryFactory.java

示例8: execute

import org.apache.lucene.store.NativeFSLockFactory; //导入依赖的package包/类
@Override
protected void execute(Terminal terminal, OptionSet options, Environment env) throws Exception {
    boolean batch = options.has(batchMode);

    Path translogPath = getTranslogPath(options);
    Path idxLocation = translogPath.getParent().resolve("index");

    if (Files.exists(translogPath) == false || Files.isDirectory(translogPath) == false) {
        throw new ElasticsearchException("translog directory [" + translogPath + "], must exist and be a directory");
    }

    if (Files.exists(idxLocation) == false || Files.isDirectory(idxLocation) == false) {
        throw new ElasticsearchException("unable to find a shard at [" + idxLocation + "], which must exist and be a directory");
    }

    // Hold the lock open for the duration of the tool running
    try (Directory dir = FSDirectory.open(idxLocation, NativeFSLockFactory.INSTANCE);
            Lock writeLock = dir.obtainLock(IndexWriter.WRITE_LOCK_NAME)) {
        Set<Path> translogFiles;
        try {
            terminal.println("Checking existing translog files");
            translogFiles = filesInDirectory(translogPath);
        } catch (IOException e) {
            terminal.println("encountered IOException while listing directory, aborting...");
            throw new ElasticsearchException("failed to find existing translog files", e);
        }

        // Warn about ES being stopped and files being deleted
        warnAboutDeletingFiles(terminal, translogFiles, batch);

        List<IndexCommit> commits;
        try {
            terminal.println("Reading translog UUID information from Lucene commit from shard at [" + idxLocation + "]");
            commits = DirectoryReader.listCommits(dir);
        } catch (IndexNotFoundException infe) {
            throw new ElasticsearchException("unable to find a valid shard at [" + idxLocation + "]", infe);
        }

        // Retrieve the generation and UUID from the existing data
        Map<String, String> commitData = commits.get(commits.size() - 1).getUserData();
        String translogGeneration = commitData.get(Translog.TRANSLOG_GENERATION_KEY);
        String translogUUID = commitData.get(Translog.TRANSLOG_UUID_KEY);
        if (translogGeneration == null || translogUUID == null) {
            throw new ElasticsearchException("shard must have a valid translog generation and UUID but got: [{}] and: [{}]",
                    translogGeneration, translogUUID);
        }
        terminal.println("Translog Generation: " + translogGeneration);
        terminal.println("Translog UUID      : " + translogUUID);

        Path tempEmptyCheckpoint = translogPath.resolve("temp-" + Translog.CHECKPOINT_FILE_NAME);
        Path realEmptyCheckpoint = translogPath.resolve(Translog.CHECKPOINT_FILE_NAME);
        Path tempEmptyTranslog = translogPath.resolve("temp-" + Translog.TRANSLOG_FILE_PREFIX +
                        translogGeneration + Translog.TRANSLOG_FILE_SUFFIX);
        Path realEmptyTranslog = translogPath.resolve(Translog.TRANSLOG_FILE_PREFIX +
                        translogGeneration + Translog.TRANSLOG_FILE_SUFFIX);

        // Write empty checkpoint and translog to empty files
        long gen = Long.parseLong(translogGeneration);
        int translogLen = writeEmptyTranslog(tempEmptyTranslog, translogUUID);
        writeEmptyCheckpoint(tempEmptyCheckpoint, translogLen, gen);

        terminal.println("Removing existing translog files");
        IOUtils.rm(translogFiles.toArray(new Path[]{}));

        terminal.println("Creating new empty checkpoint at [" + realEmptyCheckpoint + "]");
        Files.move(tempEmptyCheckpoint, realEmptyCheckpoint, StandardCopyOption.ATOMIC_MOVE);
        terminal.println("Creating new empty translog at [" + realEmptyTranslog + "]");
        Files.move(tempEmptyTranslog, realEmptyTranslog, StandardCopyOption.ATOMIC_MOVE);

        // Fsync the translog directory after rename
        IOUtils.fsync(translogPath, true);

    } catch (LockObtainFailedException lofe) {
        throw new ElasticsearchException("Failed to lock shard's directory at [" + idxLocation + "], is Elasticsearch still running?");
    }

    terminal.println("Done.");
}
 
开发者ID:justor,项目名称:elasticsearch_my,代码行数:79,代码来源:TruncateTranslogCommand.java

示例9: start

import org.apache.lucene.store.NativeFSLockFactory; //导入依赖的package包/类
@PostConstruct
public void start() {
	try {
		Analyzer analyzer = new TermNameAnalyzer(true);
		File indexPath = new File(path);
		
		if(!indexPath.exists()) {
			indexPath.mkdirs();
		}
		
		directory = new MMapDirectory(indexPath, new NativeFSLockFactory());
		if(MMapDirectory.UNMAP_SUPPORTED) {
			((MMapDirectory)directory).setUseUnmap(true);
		}
		boolean indexExists = IndexReader.indexExists(directory);
		
		writer = new IndexWriter(directory, analyzer, 
				new IndexWriter.MaxFieldLength(MAX_CHARS));
		
		if(!indexExists) {
			logger.info("Building ontology search index.");
			Collection<Term> terms = termDAO.loadAll();
			for(Term term : terms) {
				if(StatusChecker.isValid(term)) {
					Collection<Document> docs = createDocuments(term);
					for(Document doc : docs) {
						writer.addDocument(doc);
					}
				}
			}
		}
		writer.optimize();
		writer.commit();
		numberOfDocuments = writer.numDocs();
		xar = new LuceneIndexWriterXAResource(writer, this);
		reader = IndexReader.open(directory, true);
		searcher = new IndexSearcher(reader);
		rwlock = new ReentrantReadWriteLock();
	} catch(Exception e) {
		logger.log(Level.WARNING, "Failed to start Lucene term searcher", e);
		stop();
		throw new RuntimeException("Failed to start Lucene term searcher", e);
	}
}
 
开发者ID:Novartis,项目名称:ontobrowser,代码行数:45,代码来源:OntologySearchServiceImpl.java

示例10: injectLockFactory

import org.apache.lucene.store.NativeFSLockFactory; //导入依赖的package包/类
private static Directory injectLockFactory(Directory dir, String lockPath,
    String rawLockType) throws IOException {
  if (null == rawLockType) {
    // we default to "simple" for backwards compatibility
    log.warn("No lockType configured for " + dir + " assuming 'simple'");
    rawLockType = "simple";
  }
  final String lockType = rawLockType.toLowerCase(Locale.ROOT).trim();
  
  if ("simple".equals(lockType)) {
    // multiple SimpleFSLockFactory instances should be OK
    dir.setLockFactory(new SimpleFSLockFactory(lockPath));
  } else if ("native".equals(lockType)) {
    dir.setLockFactory(new NativeFSLockFactory(lockPath));
  } else if ("single".equals(lockType)) {
    if (!(dir.getLockFactory() instanceof SingleInstanceLockFactory)) dir
        .setLockFactory(new SingleInstanceLockFactory());
  } else if ("hdfs".equals(lockType)) {
    Directory del = dir;
    
    if (dir instanceof NRTCachingDirectory) {
      del = ((NRTCachingDirectory) del).getDelegate();
    }
    
    if (del instanceof BlockDirectory) {
      del = ((BlockDirectory) del).getDirectory();
    }
    
    if (!(del instanceof HdfsDirectory)) {
      throw new SolrException(ErrorCode.FORBIDDEN, "Directory: "
          + del.getClass().getName()
          + ", but hdfs lock factory can only be used with HdfsDirectory");
    }

    dir.setLockFactory(new HdfsLockFactory(((HdfsDirectory)del).getHdfsDirPath(), ((HdfsDirectory)del).getConfiguration()));
  } else if ("none".equals(lockType)) {
    // Recipe for disaster
    log.error("CONFIGURATION WARNING: locks are disabled on " + dir);
    dir.setLockFactory(NoLockFactory.getNoLockFactory());
  } else {
    throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
        "Unrecognized lockType: " + rawLockType);
  }
  return dir;
}
 
开发者ID:europeana,项目名称:search,代码行数:46,代码来源:CachingDirectoryFactory.java


注:本文中的org.apache.lucene.store.NativeFSLockFactory类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。