当前位置: 首页>>代码示例>>Java>>正文


Java EditLogFileInputStream.readOp方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.server.namenode.EditLogFileInputStream.readOp方法的典型用法代码示例。如果您正苦于以下问题:Java EditLogFileInputStream.readOp方法的具体用法?Java EditLogFileInputStream.readOp怎么用?Java EditLogFileInputStream.readOp使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.server.namenode.EditLogFileInputStream的用法示例。


在下文中一共展示了EditLogFileInputStream.readOp方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testPreserveEditLogs

import org.apache.hadoop.hdfs.server.namenode.EditLogFileInputStream; //导入方法依赖的package包/类
@Test
public void testPreserveEditLogs() throws Exception {
  conf = new HdfsConfiguration();
  conf = UpgradeUtilities.initializeStorageStateConf(1, conf);
  String[] nameNodeDirs = conf.getStrings(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY);
  conf.setBoolean(DFSConfigKeys.DFS_DATANODE_DUPLICATE_REPLICA_DELETION, false);

  log("Normal NameNode upgrade", 1);
  File[] created =
      UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
  for (final File createdDir : created) {
    List<String> fileNameList =
        IOUtils.listDirectory(createdDir, EditLogsFilter.INSTANCE);
    for (String fileName : fileNameList) {
      String tmpFileName = fileName + ".tmp";
      File existingFile = new File(createdDir, fileName);
      File tmpFile = new File(createdDir, tmpFileName);
      Files.move(existingFile.toPath(), tmpFile.toPath());
      File newFile = new File(createdDir, fileName);
      Preconditions.checkState(newFile.createNewFile(),
          "Cannot create new edits log file in " + createdDir);
      EditLogFileInputStream in = new EditLogFileInputStream(tmpFile,
          HdfsConstants.INVALID_TXID, HdfsConstants.INVALID_TXID,
          false);
      EditLogFileOutputStream out = new EditLogFileOutputStream(conf, newFile,
          (int)tmpFile.length());
      out.create(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION + 1);
      FSEditLogOp logOp = in.readOp();
      while (logOp != null) {
        out.write(logOp);
        logOp = in.readOp();
      }
      out.setReadyToFlush();
      out.flushAndSync(true);
      out.close();
      Files.delete(tmpFile.toPath());
    }
  }

  cluster = createCluster();

  DFSInotifyEventInputStream ieis =
      cluster.getFileSystem().getInotifyEventStream(0);
  EventBatch batch = ieis.poll();
  Event[] events = batch.getEvents();
  assertTrue("Should be able to get transactions before the upgrade.",
      events.length > 0);
  assertEquals(events[0].getEventType(), Event.EventType.CREATE);
  assertEquals(((CreateEvent) events[0]).getPath(), "/TestUpgrade");
  cluster.shutdown();
  UpgradeUtilities.createEmptyDirs(nameNodeDirs);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:53,代码来源:TestDFSUpgrade.java

示例2: testReadAndRefreshAfterEachTransactionInner

import org.apache.hadoop.hdfs.server.namenode.EditLogFileInputStream; //导入方法依赖的package包/类
private void testReadAndRefreshAfterEachTransactionInner(int numEdits)
  throws Exception {
  FSEditLog.sizeFlushBuffer = 100;
  LedgerHandle ledgerOut = createLedger();
  long ledgerId = ledgerOut.getId();
  BookKeeperEditLogOutputStream bkEditsOut =
      new BookKeeperEditLogOutputStream(ledgerOut);
  EditLogFileOutputStream fileEditsOut =
      new EditLogFileOutputStream(tempEditsFile, null);

  FSEditLogTestUtil.createAndPopulateStreams(1,
      numEdits, bkEditsOut, fileEditsOut);

  BookKeeperEditLogInputStream bkEditsIn =
      new BookKeeperEditLogInputStream(ledgerProvider,
          ledgerId,
          0,
          1,
          numEdits,
          false);

  EditLogFileInputStream fileEditsIn =
      new EditLogFileInputStream(tempEditsFile);

  assertEquals("Length in bytes must be equal!",
      bkEditsIn.length(), fileEditsIn.length());

  long lastBkPos = bkEditsIn.getPosition();
  long lastFilePos = fileEditsIn.getPosition();
  for (int i = 1; i <= numEdits; i++) {
    assertEquals("Position in file must be equal position in bk",
        lastBkPos, lastFilePos);
    bkEditsIn.refresh(lastBkPos, -1);
    fileEditsIn.refresh(lastFilePos, -1);
    FSEditLogOp opFromBk = bkEditsIn.readOp();
    FSEditLogOp opFromFile = fileEditsIn.readOp();
    if (LOG.isDebugEnabled()) {
      LOG.debug("txId = " + i + ", " + "opFromBk = " + opFromBk +
          ", opFromFile = " + opFromFile);
    }
    assertEquals(
        "Operation read from file and BookKeeper must be same after refresh",
        opFromBk, opFromFile);
    lastBkPos = bkEditsIn.getPosition();
    lastFilePos = fileEditsIn.getPosition();
  }
  assertNull("BookKeeper edit log must end at last txId", bkEditsIn.readOp());
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:49,代码来源:TestBookKeeperEditLogInputStream.java

示例3: testReadBufferGreaterThanLedgerSizeInner

import org.apache.hadoop.hdfs.server.namenode.EditLogFileInputStream; //导入方法依赖的package包/类
private void testReadBufferGreaterThanLedgerSizeInner(int numEdits)
    throws Exception {
  LedgerHandle ledgerOut = createLedger();
  long ledgerId = ledgerOut.getId();
  BookKeeperEditLogInputStream bkEditsIn =
      new BookKeeperEditLogInputStream(ledgerProvider,
          ledgerId,
          0,
          1,
          -1,
          true);
  EditLogFileOutputStream fileEditsOut =
      new EditLogFileOutputStream(tempEditsFile, null);
  bkEditsIn.init();
  // Set the edit log buffer flush size smaller than the size of
  // of the buffer in BufferedInputStream in BookKeeperJournalInputStream
  FSEditLog.sizeFlushBuffer = bkEditsIn.bin.available() / 3;
  LOG.info("Set flush buffer size to " + FSEditLog.sizeFlushBuffer);

  BookKeeperEditLogOutputStream bkEditsOut =
      new BookKeeperEditLogOutputStream(ledgerOut);

  FSEditLogTestUtil.createAndPopulateStreams(1, numEdits, bkEditsOut,
      fileEditsOut);

  // Re-try refreshing up to ten times until we are able to refresh
  // successfully to be beginning of the ledger and read the edit log
  // layout version
  int maxTries = 10;
  for (int i = 0; i < maxTries; i++) {
    try {
      bkEditsIn.refresh(0, -1);
      assertEquals("refresh succeeded", bkEditsIn.logVersion,
          FSConstants.LAYOUT_VERSION);
    } catch (AssertionFailedError e) {
      if (i == maxTries) {
        // Fail the unit test rethrowing the assertion failure if we've
        // reached the maximum number of retries
        throw e;
      }
    }
  }
  EditLogFileInputStream fileEditsIn =
      new EditLogFileInputStream(tempEditsFile);
  for (int i = 0; i <= numEdits; i++) {
    FSEditLogOp opFromBk = bkEditsIn.readOp();
    FSEditLogOp opFromFile = fileEditsIn.readOp();
    if (LOG.isDebugEnabled()) {
      LOG.debug("txId = " + i + ", " + "opFromBk = " + opFromBk +
          ", opFromFile = " + opFromFile);
    }
    assertEquals(
        "Operation read from file and BookKeeper must be same after refresh",
        opFromBk, opFromFile);
  }
  assertNull("BookKeeper edit log must end at txid 1000", bkEditsIn.readOp());
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:58,代码来源:TestBookKeeperEditLogInputStream.java


注:本文中的org.apache.hadoop.hdfs.server.namenode.EditLogFileInputStream.readOp方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。