当前位置: 首页>>代码示例>>Java>>正文


Java StartupProgress.endPhase方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress.endPhase方法的典型用法代码示例。如果您正苦于以下问题:Java StartupProgress.endPhase方法的具体用法?Java StartupProgress.endPhase怎么用?Java StartupProgress.endPhase使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress的用法示例。


在下文中一共展示了StartupProgress.endPhase方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: leave

import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress; //导入方法依赖的package包/类
/**
 * Leave safe mode.
 * <p>
 * Check for invalid, under- & over-replicated blocks in the end of startup.
 */
private synchronized void leave() {
  // if not done yet, initialize replication queues.
  // In the standby, do not populate repl queues
  if (!isPopulatingReplQueues() && shouldPopulateReplQueues()) {
    initializeReplQueues();
  }
  long timeInSafemode = now() - startTime;
  NameNode.stateChangeLog.info("STATE* Leaving safe mode after " 
                                + timeInSafemode/1000 + " secs");
  NameNode.getNameNodeMetrics().setSafeModeTime((int) timeInSafemode);

  //Log the following only once (when transitioning from ON -> OFF)
  if (reached >= 0) {
    NameNode.stateChangeLog.info("STATE* Safe mode is OFF"); 
  }
  reached = -1;
  reachedTimestamp = -1;
  safeMode = null;
  final NetworkTopology nt = blockManager.getDatanodeManager().getNetworkTopology();
  NameNode.stateChangeLog.info("STATE* Network topology has "
      + nt.getNumOfRacks() + " racks and "
      + nt.getNumOfLeaves() + " datanodes");
  NameNode.stateChangeLog.info("STATE* UnderReplicatedBlocks has "
      + blockManager.numOfUnderReplicatedBlocks() + " blocks");

  startSecretManagerIfNecessary();

  // If startup has not yet completed, end safemode phase.
  StartupProgress prog = NameNode.getStartupProgress();
  if (prog.getStatus(Phase.SAFEMODE) != Status.COMPLETE) {
    prog.endStep(Phase.SAFEMODE, STEP_AWAITING_REPORTED_BLOCKS);
    prog.endPhase(Phase.SAFEMODE);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:40,代码来源:FSNamesystem.java

示例2: loadEdits

import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress; //导入方法依赖的package包/类
private long loadEdits(Iterable<EditLogInputStream> editStreams,
    FSNamesystem target, StartupOption startOpt, MetaRecoveryContext recovery)
    throws IOException {
  LOG.debug("About to load edits:\n  " + Joiner.on("\n  ").join(editStreams));
  StartupProgress prog = NameNode.getStartupProgress();
  prog.beginPhase(Phase.LOADING_EDITS);
  
  long prevLastAppliedTxId = lastAppliedTxId;  
  try {    
    FSEditLogLoader loader = new FSEditLogLoader(target, lastAppliedTxId);
    
    // Load latest edits
    for (EditLogInputStream editIn : editStreams) {
      LOG.info("Reading " + editIn + " expecting start txid #" +
            (lastAppliedTxId + 1));
      try {
        loader.loadFSEdits(editIn, lastAppliedTxId + 1, startOpt, recovery);
      } finally {
        // Update lastAppliedTxId even in case of error, since some ops may
        // have been successfully applied before the error.
        lastAppliedTxId = loader.getLastAppliedTxId();
      }
      // If we are in recovery mode, we may have skipped over some txids.
      if (editIn.getLastTxId() != HdfsConstants.INVALID_TXID) {
        lastAppliedTxId = editIn.getLastTxId();
      }
    }
  } finally {
    FSEditLog.closeAllStreams(editStreams);
    // update the counts
    updateCountForQuota(target.getBlockManager().getStoragePolicySuite(),
        target.dir.rootDir);
  }
  prog.endPhase(Phase.LOADING_EDITS);
  return lastAppliedTxId - prevLastAppliedTxId;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:37,代码来源:FSImage.java

示例3: loadEdits

import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress; //导入方法依赖的package包/类
private long loadEdits(Iterable<EditLogInputStream> editStreams,
    FSNamesystem target, StartupOption startOpt, MetaRecoveryContext recovery)
    throws IOException {
  LOG.debug("About to load edits:\n  " + Joiner.on("\n  ").join(editStreams));
  StartupProgress prog = NameNode.getStartupProgress();
  prog.beginPhase(Phase.LOADING_EDITS);
  
  long prevLastAppliedTxId = lastAppliedTxId;  
  try {    
    FSEditLogLoader loader = new FSEditLogLoader(target, lastAppliedTxId);
    
    // Load latest edits
    for (EditLogInputStream editIn : editStreams) {
      LOG.info("Reading " + editIn + " expecting start txid #" +
            (lastAppliedTxId + 1));
      try {
        loader.loadFSEdits(editIn, lastAppliedTxId + 1, startOpt, recovery);
      } finally {
        // Update lastAppliedTxId even in case of error, since some ops may
        // have been successfully applied before the error.
        lastAppliedTxId = loader.getLastAppliedTxId();
      }
      // If we are in recovery mode, we may have skipped over some txids.
      if (editIn.getLastTxId() != HdfsServerConstants.INVALID_TXID) {
        lastAppliedTxId = editIn.getLastTxId();
      }
    }
  } finally {
    FSEditLog.closeAllStreams(editStreams);
  }
  prog.endPhase(Phase.LOADING_EDITS);
  return lastAppliedTxId - prevLastAppliedTxId;
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:34,代码来源:FSImage.java

示例4: leave

import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress; //导入方法依赖的package包/类
/**
 * Leave safe mode.
 * <p>
 * Check for invalid, under- & over-replicated blocks in the end of startup.
 */
private synchronized void leave() {
  // if not done yet, initialize replication queues.
  // In the standby, do not populate repl queues
  if (!isPopulatingReplQueues() && shouldPopulateReplQueues()) {
    initializeReplQueues();
  }
  long timeInSafemode = now() - startTime;
  NameNode.stateChangeLog.info("STATE* Leaving safe mode after " 
                                + timeInSafemode/1000 + " secs");
  NameNode.getNameNodeMetrics().setSafeModeTime((int) timeInSafemode);

  //Log the following only once (when transitioning from ON -> OFF)
  if (reached >= 0) {
    NameNode.stateChangeLog.info("STATE* Safe mode is OFF"); 
  }
  reached = -1;
  safeMode = null;
  final NetworkTopology nt = blockManager.getDatanodeManager().getNetworkTopology();
  NameNode.stateChangeLog.info("STATE* Network topology has "
      + nt.getNumOfRacks() + " racks and "
      + nt.getNumOfLeaves() + " datanodes");
  NameNode.stateChangeLog.info("STATE* UnderReplicatedBlocks has "
      + blockManager.numOfUnderReplicatedBlocks() + " blocks");

  startSecretManagerIfNecessary();

  // If startup has not yet completed, end safemode phase.
  StartupProgress prog = NameNode.getStartupProgress();
  if (prog.getStatus(Phase.SAFEMODE) != Status.COMPLETE) {
    prog.endStep(Phase.SAFEMODE, STEP_AWAITING_REPORTED_BLOCKS);
    prog.endPhase(Phase.SAFEMODE);
  }
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:39,代码来源:FSNamesystem.java

示例5: loadEdits

import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress; //导入方法依赖的package包/类
private long loadEdits(Iterable<EditLogInputStream> editStreams,
    FSNamesystem target, StartupOption startOpt, MetaRecoveryContext recovery)
    throws IOException {
  LOG.debug("About to load edits:\n  " + Joiner.on("\n  ").join(editStreams));
  StartupProgress prog = NameNode.getStartupProgress();
  prog.beginPhase(Phase.LOADING_EDITS);
  
  long prevLastAppliedTxId = lastAppliedTxId;  
  try {    
    FSEditLogLoader loader = new FSEditLogLoader(target, lastAppliedTxId);
    
    // Load latest edits
    for (EditLogInputStream editIn : editStreams) {
      LOG.info("Reading " + editIn + " expecting start txid #" +
            (lastAppliedTxId + 1));
      try {
        loader.loadFSEdits(editIn, lastAppliedTxId + 1, startOpt, recovery);
      } finally {
        // Update lastAppliedTxId even in case of error, since some ops may
        // have been successfully applied before the error.
        lastAppliedTxId = loader.getLastAppliedTxId();
      }
      // If we are in recovery mode, we may have skipped over some txids.
      if (editIn.getLastTxId() != HdfsConstants.INVALID_TXID) {
        lastAppliedTxId = editIn.getLastTxId();
      }
    }
  } finally {
    FSEditLog.closeAllStreams(editStreams);
    // update the counts
    updateCountForQuota(target.dir.rootDir);
  }
  prog.endPhase(Phase.LOADING_EDITS);
  return lastAppliedTxId - prevLastAppliedTxId;
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:36,代码来源:FSImage.java

示例6: loadFSImage

import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress; //导入方法依赖的package包/类
void loadFSImage(StartupOption startOpt, FSImage fsImage, boolean haEnabled)
    throws IOException {
  // format before starting up if requested
  if (startOpt == StartupOption.FORMAT) {
    
    fsImage.format(this, fsImage.getStorage().determineClusterId());// reuse current id

    startOpt = StartupOption.REGULAR;
  }
  boolean success = false;
  writeLock();
  try {
    // We shouldn't be calling saveNamespace if we've come up in standby state.
    MetaRecoveryContext recovery = startOpt.createRecoveryContext();
    boolean needToSave =
      fsImage.recoverTransitionRead(startOpt, this, recovery) && !haEnabled;
    if (needToSave) {
      fsImage.saveNamespace(this);
    } else {
      // No need to save, so mark the phase done.
      StartupProgress prog = NameNode.getStartupProgress();
      prog.beginPhase(Phase.SAVING_CHECKPOINT);
      prog.endPhase(Phase.SAVING_CHECKPOINT);
    }
    // This will start a new log segment and write to the seen_txid file, so
    // we shouldn't do it when coming up in standby state
    if (!haEnabled) {
      fsImage.openEditLogForWrite();
    }
    success = true;
  } finally {
    if (!success) {
      fsImage.close();
    }
    writeUnlock();
  }
  dir.imageLoadComplete();
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:39,代码来源:FSNamesystem.java

示例7: loadEdits

import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress; //导入方法依赖的package包/类
/**
 * Load the specified list of edit files into the image.
 */
public long loadEdits(Iterable<EditLogInputStream> editStreams,
    FSNamesystem target, MetaRecoveryContext recovery) throws IOException {
  LOG.debug("About to load edits:\n  " + Joiner.on("\n  ").join(editStreams));
  StartupProgress prog = NameNode.getStartupProgress();
  prog.beginPhase(Phase.LOADING_EDITS);
  
  long prevLastAppliedTxId = lastAppliedTxId;  
  try {    
    FSEditLogLoader loader = new FSEditLogLoader(target, lastAppliedTxId);
    
    // Load latest edits
    for (EditLogInputStream editIn : editStreams) {
      LOG.info("Reading " + editIn + " expecting start txid #" +
            (lastAppliedTxId + 1));
      try {
        loader.loadFSEdits(editIn, lastAppliedTxId + 1, recovery);
      } finally {
        // Update lastAppliedTxId even in case of error, since some ops may
        // have been successfully applied before the error.
        lastAppliedTxId = loader.getLastAppliedTxId();
      }
      // If we are in recovery mode, we may have skipped over some txids.
      if (editIn.getLastTxId() != HdfsConstants.INVALID_TXID) {
        lastAppliedTxId = editIn.getLastTxId();
      }
    }
  } finally {
    FSEditLog.closeAllStreams(editStreams);
    // update the counts
    updateCountForQuota(target.dir.rootDir);   
  }
  prog.endPhase(Phase.LOADING_EDITS);
  return lastAppliedTxId - prevLastAppliedTxId;
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:38,代码来源:FSImage.java

示例8: loadFSImage

import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress; //导入方法依赖的package包/类
private void loadFSImage(StartupOption startOpt) throws IOException {
  final FSImage fsImage = getFSImage();

  // format before starting up if requested
  if (startOpt == StartupOption.FORMAT) {
    
    fsImage.format(this, fsImage.getStorage().determineClusterId());// reuse current id

    startOpt = StartupOption.REGULAR;
  }
  boolean success = false;
  writeLock();
  try {
    // We shouldn't be calling saveNamespace if we've come up in standby state.
    MetaRecoveryContext recovery = startOpt.createRecoveryContext();
    final boolean staleImage
        = fsImage.recoverTransitionRead(startOpt, this, recovery);
    if (RollingUpgradeStartupOption.ROLLBACK.matches(startOpt) ||
        RollingUpgradeStartupOption.DOWNGRADE.matches(startOpt)) {
      rollingUpgradeInfo = null;
    }
    final boolean needToSave = staleImage && !haEnabled && !isRollingUpgrade(); 
    LOG.info("Need to save fs image? " + needToSave
        + " (staleImage=" + staleImage + ", haEnabled=" + haEnabled
        + ", isRollingUpgrade=" + isRollingUpgrade() + ")");
    if (needToSave) {
      fsImage.saveNamespace(this);
    } else {
      updateStorageVersionForRollingUpgrade(fsImage.getLayoutVersion(),
          startOpt);
      // No need to save, so mark the phase done.
      StartupProgress prog = NameNode.getStartupProgress();
      prog.beginPhase(Phase.SAVING_CHECKPOINT);
      prog.endPhase(Phase.SAVING_CHECKPOINT);
    }
    // This will start a new log segment and write to the seen_txid file, so
    // we shouldn't do it when coming up in standby state
    if (!haEnabled || (haEnabled && startOpt == StartupOption.UPGRADE)
        || (haEnabled && startOpt == StartupOption.UPGRADEONLY)) {
      fsImage.openEditLogForWrite();
    }
    success = true;
  } finally {
    if (!success) {
      fsImage.close();
    }
    writeUnlock();
  }
  imageLoadComplete();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:51,代码来源:FSNamesystem.java

示例9: loadFSImage

import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress; //导入方法依赖的package包/类
private void loadFSImage(StartupOption startOpt) throws IOException {
  final FSImage fsImage = getFSImage();

  // format before starting up if requested
  if (startOpt == StartupOption.FORMAT) {
    
    fsImage.format(this, fsImage.getStorage().determineClusterId());// reuse current id

    startOpt = StartupOption.REGULAR;
  }
  boolean success = false;
  writeLock();
  try {
    // We shouldn't be calling saveNamespace if we've come up in standby state.
    MetaRecoveryContext recovery = startOpt.createRecoveryContext();
    final boolean staleImage
        = fsImage.recoverTransitionRead(startOpt, this, recovery);
    if (RollingUpgradeStartupOption.ROLLBACK.matches(startOpt)) {
      rollingUpgradeInfo = null;
    }
    final boolean needToSave = staleImage && !haEnabled && !isRollingUpgrade(); 
    LOG.info("Need to save fs image? " + needToSave
        + " (staleImage=" + staleImage + ", haEnabled=" + haEnabled
        + ", isRollingUpgrade=" + isRollingUpgrade() + ")");
    if (needToSave) {
      fsImage.saveNamespace(this);
    } else {
      // No need to save, so mark the phase done.
      StartupProgress prog = NameNode.getStartupProgress();
      prog.beginPhase(Phase.SAVING_CHECKPOINT);
      prog.endPhase(Phase.SAVING_CHECKPOINT);
    }
    // This will start a new log segment and write to the seen_txid file, so
    // we shouldn't do it when coming up in standby state
    if (!haEnabled || (haEnabled && startOpt == StartupOption.UPGRADE)
        || (haEnabled && startOpt == StartupOption.UPGRADEONLY)) {
      fsImage.openEditLogForWrite(getEffectiveLayoutVersion());
    }
    success = true;
  } finally {
    if (!success) {
      fsImage.close();
    }
    writeUnlock();
  }
  imageLoadComplete();
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:48,代码来源:FSNamesystem.java

示例10: loadFSImage

import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress; //导入方法依赖的package包/类
private void loadFSImage(StartupOption startOpt) throws IOException {
  final FSImage fsImage = getFSImage();

  // format before starting up if requested
  if (startOpt == StartupOption.FORMAT) {
    
    fsImage.format(this, fsImage.getStorage().determineClusterId());// reuse current id

    startOpt = StartupOption.REGULAR;
  }
  boolean success = false;
  writeLock();
  try {
    // We shouldn't be calling saveNamespace if we've come up in standby state.
    MetaRecoveryContext recovery = startOpt.createRecoveryContext();
    final boolean staleImage
        = fsImage.recoverTransitionRead(startOpt, this, recovery);
    if (RollingUpgradeStartupOption.ROLLBACK.matches(startOpt)) {
      rollingUpgradeInfo = null;
    }
    final boolean needToSave = staleImage && !haEnabled && !isRollingUpgrade(); 
    LOG.info("Need to save fs image? " + needToSave
        + " (staleImage=" + staleImage + ", haEnabled=" + haEnabled
        + ", isRollingUpgrade=" + isRollingUpgrade() + ")");
    if (needToSave) {
      fsImage.saveNamespace(this);
    } else {
      // No need to save, so mark the phase done.
      StartupProgress prog = NameNode.getStartupProgress();
      prog.beginPhase(Phase.SAVING_CHECKPOINT);
      prog.endPhase(Phase.SAVING_CHECKPOINT);
    }
    // This will start a new log segment and write to the seen_txid file, so
    // we shouldn't do it when coming up in standby state
    if (!haEnabled || (haEnabled && startOpt == StartupOption.UPGRADE)) {
      fsImage.openEditLogForWrite();
    }
    success = true;
  } finally {
    if (!success) {
      fsImage.close();
    }
    writeUnlock();
  }
  dir.imageLoadComplete();
}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre2,代码行数:47,代码来源:FSNamesystem.java


注:本文中的org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress.endPhase方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。