当前位置: 首页>>代码示例>>Java>>正文


Java StartupProgress.endStep方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress.endStep方法的典型用法代码示例。如果您正苦于以下问题:Java StartupProgress.endStep方法的具体用法?Java StartupProgress.endStep怎么用?Java StartupProgress.endStep使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress的用法示例。


在下文中一共展示了StartupProgress.endStep方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: saveCurrentTokens

import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress; //导入方法依赖的package包/类
/**
 * Private helper methods to save delegation keys and tokens in fsimage
 */
private synchronized void saveCurrentTokens(DataOutputStream out,
    String sdPath) throws IOException {
  StartupProgress prog = NameNode.getStartupProgress();
  Step step = new Step(StepType.DELEGATION_TOKENS, sdPath);
  prog.beginStep(Phase.SAVING_CHECKPOINT, step);
  prog.setTotal(Phase.SAVING_CHECKPOINT, step, currentTokens.size());
  Counter counter = prog.getCounter(Phase.SAVING_CHECKPOINT, step);
  out.writeInt(currentTokens.size());
  Iterator<DelegationTokenIdentifier> iter = currentTokens.keySet()
      .iterator();
  while (iter.hasNext()) {
    DelegationTokenIdentifier id = iter.next();
    id.write(out);
    DelegationTokenInformation info = currentTokens.get(id);
    out.writeLong(info.getRenewDate());
    counter.increment();
  }
  prog.endStep(Phase.SAVING_CHECKPOINT, step);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:23,代码来源:DelegationTokenSecretManager.java

示例2: saveAllKeys

import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress; //导入方法依赖的package包/类
private synchronized void saveAllKeys(DataOutputStream out, String sdPath)
    throws IOException {
  StartupProgress prog = NameNode.getStartupProgress();
  Step step = new Step(StepType.DELEGATION_KEYS, sdPath);
  prog.beginStep(Phase.SAVING_CHECKPOINT, step);
  prog.setTotal(Phase.SAVING_CHECKPOINT, step, currentTokens.size());
  Counter counter = prog.getCounter(Phase.SAVING_CHECKPOINT, step);
  out.writeInt(allKeys.size());
  Iterator<Integer> iter = allKeys.keySet().iterator();
  while (iter.hasNext()) {
    Integer key = iter.next();
    allKeys.get(key).write(out);
    counter.increment();
  }
  prog.endStep(Phase.SAVING_CHECKPOINT, step);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:17,代码来源:DelegationTokenSecretManager.java

示例3: loadCurrentTokens

import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress; //导入方法依赖的package包/类
/**
 * Private helper methods to load Delegation tokens from fsimage
 */
private synchronized void loadCurrentTokens(DataInput in)
    throws IOException {
  StartupProgress prog = NameNode.getStartupProgress();
  Step step = new Step(StepType.DELEGATION_TOKENS);
  prog.beginStep(Phase.LOADING_FSIMAGE, step);
  int numberOfTokens = in.readInt();
  prog.setTotal(Phase.LOADING_FSIMAGE, step, numberOfTokens);
  Counter counter = prog.getCounter(Phase.LOADING_FSIMAGE, step);
  for (int i = 0; i < numberOfTokens; i++) {
    DelegationTokenIdentifier id = new DelegationTokenIdentifier();
    id.readFields(in);
    long expiryTime = in.readLong();
    addPersistedDelegationToken(id, expiryTime);
    counter.increment();
  }
  prog.endStep(Phase.LOADING_FSIMAGE, step);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:21,代码来源:DelegationTokenSecretManager.java

示例4: loadAllKeys

import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress; //导入方法依赖的package包/类
/**
 * Private helper method to load delegation keys from fsimage.
 * @throws IOException on error
 */
private synchronized void loadAllKeys(DataInput in) throws IOException {
  StartupProgress prog = NameNode.getStartupProgress();
  Step step = new Step(StepType.DELEGATION_KEYS);
  prog.beginStep(Phase.LOADING_FSIMAGE, step);
  int numberOfKeys = in.readInt();
  prog.setTotal(Phase.LOADING_FSIMAGE, step, numberOfKeys);
  Counter counter = prog.getCounter(Phase.LOADING_FSIMAGE, step);
  for (int i = 0; i < numberOfKeys; i++) {
    DelegationKey value = new DelegationKey();
    value.readFields(in);
    addKey(value);
    counter.increment();
  }
  prog.endStep(Phase.LOADING_FSIMAGE, step);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:20,代码来源:DelegationTokenSecretManager.java

示例5: loadFSEdits

import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress; //导入方法依赖的package包/类
/**
 * Load an edit log, and apply the changes to the in-memory structure
 * This is where we apply edits that we've been writing to disk all
 * along.
 */
long loadFSEdits(EditLogInputStream edits, long expectedStartingTxId,
    StartupOption startOpt, MetaRecoveryContext recovery) throws IOException {
  StartupProgress prog = NameNode.getStartupProgress();
  Step step = createStartupProgressStep(edits);
  prog.beginStep(Phase.LOADING_EDITS, step);
  fsNamesys.writeLock();
  try {
    long startTime = monotonicNow();
    FSImage.LOG.info("Start loading edits file " + edits.getName());
    long numEdits = loadEditRecords(edits, false, expectedStartingTxId,
        startOpt, recovery);
    FSImage.LOG.info("Edits file " + edits.getName() 
        + " of size " + edits.length() + " edits # " + numEdits 
        + " loaded in " + (monotonicNow()-startTime)/1000 + " seconds");
    return numEdits;
  } finally {
    edits.close();
    fsNamesys.writeUnlock();
    prog.endStep(Phase.LOADING_EDITS, step);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:27,代码来源:FSEditLogLoader.java

示例6: loadDirectives

import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress; //导入方法依赖的package包/类
/**
 * Load cache directives from the fsimage
 */
private void loadDirectives(DataInput in) throws IOException {
  StartupProgress prog = NameNode.getStartupProgress();
  Step step = new Step(StepType.CACHE_ENTRIES);
  prog.beginStep(Phase.LOADING_FSIMAGE, step);
  int numDirectives = in.readInt();
  prog.setTotal(Phase.LOADING_FSIMAGE, step, numDirectives);
  Counter counter = prog.getCounter(Phase.LOADING_FSIMAGE, step);
  for (int i = 0; i < numDirectives; i++) {
    CacheDirectiveInfo info = FSImageSerialization.readCacheDirectiveInfo(in);
    // Get pool reference by looking it up in the map
    final String poolName = info.getPool();
    CacheDirective directive =
        new CacheDirective(info.getId(), info.getPath().toUri().getPath(),
            info.getReplication(), info.getExpiration().getAbsoluteMillis());
    addCacheDirective(poolName, directive);
    counter.increment();
  }
  prog.endStep(Phase.LOADING_FSIMAGE, step);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:23,代码来源:CacheManager.java

示例7: loadFSEdits

import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress; //导入方法依赖的package包/类
/**
 * Load an edit log, and apply the changes to the in-memory structure
 * This is where we apply edits that we've been writing to disk all
 * along.
 */
long loadFSEdits(EditLogInputStream edits, long expectedStartingTxId,
    StartupOption startOpt, MetaRecoveryContext recovery) throws IOException {
  StartupProgress prog = NameNode.getStartupProgress();
  Step step = createStartupProgressStep(edits);
  prog.beginStep(Phase.LOADING_EDITS, step);
  fsNamesys.writeLock();
  try {
    long startTime = now();
    FSImage.LOG.info("Start loading edits file " + edits.getName());
    long numEdits = loadEditRecords(edits, false, expectedStartingTxId,
        startOpt, recovery);
    FSImage.LOG.info("Edits file " + edits.getName() 
        + " of size " + edits.length() + " edits # " + numEdits 
        + " loaded in " + (now()-startTime)/1000 + " seconds");
    return numEdits;
  } finally {
    edits.close();
    fsNamesys.writeUnlock();
    prog.endStep(Phase.LOADING_EDITS, step);
  }
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:27,代码来源:FSEditLogLoader.java

示例8: loadAllKeys

import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress; //导入方法依赖的package包/类
/**
 * Private helper method to load delegation keys from fsimage.
 * @param in
 * @throws IOException
 */
private synchronized void loadAllKeys(DataInput in) throws IOException {
  StartupProgress prog = NameNode.getStartupProgress();
  Step step = new Step(StepType.DELEGATION_KEYS);
  prog.beginStep(Phase.LOADING_FSIMAGE, step);
  int numberOfKeys = in.readInt();
  prog.setTotal(Phase.LOADING_FSIMAGE, step, numberOfKeys);
  Counter counter = prog.getCounter(Phase.LOADING_FSIMAGE, step);
  for (int i = 0; i < numberOfKeys; i++) {
    DelegationKey value = new DelegationKey();
    value.readFields(in);
    addKey(value);
    counter.increment();
  }
  prog.endStep(Phase.LOADING_FSIMAGE, step);
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:21,代码来源:DelegationTokenSecretManager.java

示例9: loadFSEdits

import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress; //导入方法依赖的package包/类
/**
 * Load an edit log, and apply the changes to the in-memory structure
 * This is where we apply edits that we've been writing to disk all
 * along.
 */
long loadFSEdits(EditLogInputStream edits, long expectedStartingTxId,
    MetaRecoveryContext recovery) throws IOException {
  StartupProgress prog = NameNode.getStartupProgress();
  Step step = createStartupProgressStep(edits);
  prog.beginStep(Phase.LOADING_EDITS, step);
  fsNamesys.writeLock();
  try {
    long startTime = now();
    FSImage.LOG.info("Start loading edits file " + edits.getName());
    long numEdits = loadEditRecords(edits, false, 
                               expectedStartingTxId, recovery);
    FSImage.LOG.info("Edits file " + edits.getName() 
        + " of size " + edits.length() + " edits # " + numEdits 
        + " loaded in " + (now()-startTime)/1000 + " seconds");
    return numEdits;
  } finally {
    edits.close();
    fsNamesys.writeUnlock();
    prog.endStep(Phase.LOADING_EDITS, step);
  }
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:27,代码来源:FSEditLogLoader.java

示例10: leave

import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress; //导入方法依赖的package包/类
/**
 * Leave safe mode.
 * <p>
 * Check for invalid, under- & over-replicated blocks in the end of startup.
 */
private synchronized void leave() {
  // if not done yet, initialize replication queues.
  // In the standby, do not populate repl queues
  if (!isPopulatingReplQueues() && shouldPopulateReplQueues()) {
    initializeReplQueues();
  }
  long timeInSafemode = now() - startTime;
  NameNode.stateChangeLog.info("STATE* Leaving safe mode after " 
                                + timeInSafemode/1000 + " secs");
  NameNode.getNameNodeMetrics().setSafeModeTime((int) timeInSafemode);

  //Log the following only once (when transitioning from ON -> OFF)
  if (reached >= 0) {
    NameNode.stateChangeLog.info("STATE* Safe mode is OFF"); 
  }
  reached = -1;
  reachedTimestamp = -1;
  safeMode = null;
  final NetworkTopology nt = blockManager.getDatanodeManager().getNetworkTopology();
  NameNode.stateChangeLog.info("STATE* Network topology has "
      + nt.getNumOfRacks() + " racks and "
      + nt.getNumOfLeaves() + " datanodes");
  NameNode.stateChangeLog.info("STATE* UnderReplicatedBlocks has "
      + blockManager.numOfUnderReplicatedBlocks() + " blocks");

  startSecretManagerIfNecessary();

  // If startup has not yet completed, end safemode phase.
  StartupProgress prog = NameNode.getStartupProgress();
  if (prog.getStatus(Phase.SAFEMODE) != Status.COMPLETE) {
    prog.endStep(Phase.SAFEMODE, STEP_AWAITING_REPORTED_BLOCKS);
    prog.endPhase(Phase.SAFEMODE);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:40,代码来源:FSNamesystem.java

示例11: savePools

import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress; //导入方法依赖的package包/类
/**
 * Save cache pools to fsimage
 */
private void savePools(DataOutputStream out,
    String sdPath) throws IOException {
  StartupProgress prog = NameNode.getStartupProgress();
  Step step = new Step(StepType.CACHE_POOLS, sdPath);
  prog.beginStep(Phase.SAVING_CHECKPOINT, step);
  prog.setTotal(Phase.SAVING_CHECKPOINT, step, cachePools.size());
  Counter counter = prog.getCounter(Phase.SAVING_CHECKPOINT, step);
  out.writeInt(cachePools.size());
  for (CachePool pool: cachePools.values()) {
    FSImageSerialization.writeCachePoolInfo(out, pool.getInfo(true));
    counter.increment();
  }
  prog.endStep(Phase.SAVING_CHECKPOINT, step);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:18,代码来源:CacheManager.java

示例12: saveDirectives

import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress; //导入方法依赖的package包/类
private void saveDirectives(DataOutputStream out, String sdPath)
    throws IOException {
  StartupProgress prog = NameNode.getStartupProgress();
  Step step = new Step(StepType.CACHE_ENTRIES, sdPath);
  prog.beginStep(Phase.SAVING_CHECKPOINT, step);
  prog.setTotal(Phase.SAVING_CHECKPOINT, step, directivesById.size());
  Counter counter = prog.getCounter(Phase.SAVING_CHECKPOINT, step);
  out.writeInt(directivesById.size());
  for (CacheDirective directive : directivesById.values()) {
    FSImageSerialization.writeCacheDirectiveInfo(out, directive.toInfo());
    counter.increment();
  }
  prog.endStep(Phase.SAVING_CHECKPOINT, step);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:15,代码来源:CacheManager.java

示例13: loadPools

import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress; //导入方法依赖的package包/类
/**
 * Load cache pools from fsimage
 */
private void loadPools(DataInput in)
    throws IOException {
  StartupProgress prog = NameNode.getStartupProgress();
  Step step = new Step(StepType.CACHE_POOLS);
  prog.beginStep(Phase.LOADING_FSIMAGE, step);
  int numberOfPools = in.readInt();
  prog.setTotal(Phase.LOADING_FSIMAGE, step, numberOfPools);
  Counter counter = prog.getCounter(Phase.LOADING_FSIMAGE, step);
  for (int i = 0; i < numberOfPools; i++) {
    addCachePool(FSImageSerialization.readCachePoolInfo(in));
    counter.increment();
  }
  prog.endStep(Phase.LOADING_FSIMAGE, step);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:18,代码来源:CacheManager.java

示例14: leave

import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress; //导入方法依赖的package包/类
/**
 * Leave safe mode.
 * <p>
 * Check for invalid, under- & over-replicated blocks in the end of startup.
 */
private synchronized void leave() {
  // if not done yet, initialize replication queues.
  // In the standby, do not populate repl queues
  if (!isPopulatingReplQueues() && shouldPopulateReplQueues()) {
    initializeReplQueues();
  }
  long timeInSafemode = now() - startTime;
  NameNode.stateChangeLog.info("STATE* Leaving safe mode after " 
                                + timeInSafemode/1000 + " secs");
  NameNode.getNameNodeMetrics().setSafeModeTime((int) timeInSafemode);

  //Log the following only once (when transitioning from ON -> OFF)
  if (reached >= 0) {
    NameNode.stateChangeLog.info("STATE* Safe mode is OFF"); 
  }
  reached = -1;
  safeMode = null;
  final NetworkTopology nt = blockManager.getDatanodeManager().getNetworkTopology();
  NameNode.stateChangeLog.info("STATE* Network topology has "
      + nt.getNumOfRacks() + " racks and "
      + nt.getNumOfLeaves() + " datanodes");
  NameNode.stateChangeLog.info("STATE* UnderReplicatedBlocks has "
      + blockManager.numOfUnderReplicatedBlocks() + " blocks");

  startSecretManagerIfNecessary();

  // If startup has not yet completed, end safemode phase.
  StartupProgress prog = NameNode.getStartupProgress();
  if (prog.getStatus(Phase.SAFEMODE) != Status.COMPLETE) {
    prog.endStep(Phase.SAFEMODE, STEP_AWAITING_REPORTED_BLOCKS);
    prog.endPhase(Phase.SAFEMODE);
  }
}
 
开发者ID:yncxcw,项目名称:FlexMap,代码行数:39,代码来源:FSNamesystem.java

示例15: save

import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress; //导入方法依赖的package包/类
void save(File newFile, FSImageCompression compression) throws IOException {
  checkNotSaved();

  final FSNamesystem sourceNamesystem = context.getSourceNamesystem();
  FSDirectory fsDir = sourceNamesystem.dir;
  String sdPath = newFile.getParentFile().getParentFile().getAbsolutePath();
  Step step = new Step(StepType.INODES, sdPath);
  StartupProgress prog = NameNode.getStartupProgress();
  prog.beginStep(Phase.SAVING_CHECKPOINT, step);
  prog.setTotal(Phase.SAVING_CHECKPOINT, step,
    fsDir.rootDir.numItemsInTree());
  Counter counter = prog.getCounter(Phase.SAVING_CHECKPOINT, step);
  long startTime = now();
  //
  // Write out data
  //
  MessageDigest digester = MD5Hash.getDigester();
  FileOutputStream fout = new FileOutputStream(newFile);
  DigestOutputStream fos = new DigestOutputStream(fout, digester);
  DataOutputStream out = new DataOutputStream(fos);
  try {
    out.writeInt(HdfsConstants.LAYOUT_VERSION);
    // We use the non-locked version of getNamespaceInfo here since
    // the coordinating thread of saveNamespace already has read-locked
    // the namespace for us. If we attempt to take another readlock
    // from the actual saver thread, there's a potential of a
    // fairness-related deadlock. See the comments on HDFS-2223.
    out.writeInt(sourceNamesystem.unprotectedGetNamespaceInfo()
        .getNamespaceID());
    out.writeLong(fsDir.rootDir.numItemsInTree());
    out.writeLong(sourceNamesystem.getGenerationStampV1());
    out.writeLong(sourceNamesystem.getGenerationStampV2());
    out.writeLong(sourceNamesystem.getGenerationStampAtblockIdSwitch());
    out.writeLong(sourceNamesystem.getLastAllocatedBlockId());
    out.writeLong(context.getTxId());
    out.writeLong(sourceNamesystem.getLastInodeId());

    
    sourceNamesystem.getSnapshotManager().write(out);
    
    // write compression info and set up compressed stream
    out = compression.writeHeaderAndWrapStream(fos);
    LOG.info("Saving image file " + newFile +
             " using " + compression);

    // save the root
    saveINode2Image(fsDir.rootDir, out, false, referenceMap, counter);
    // save the rest of the nodes
    saveImage(fsDir.rootDir, out, true, counter);
    prog.endStep(Phase.SAVING_CHECKPOINT, step);
    // Now that the step is finished, set counter equal to total to adjust
    // for possible under-counting due to reference inodes.
    prog.setCount(Phase.SAVING_CHECKPOINT, step,
      fsDir.rootDir.numItemsInTree());
    // save files under construction
    sourceNamesystem.saveFilesUnderConstruction(out);
    context.checkCancelled();
    sourceNamesystem.saveSecretManagerState(out, sdPath);
    context.checkCancelled();
    out.flush();
    context.checkCancelled();
    fout.getChannel().force(true);
  } finally {
    out.close();
  }

  saved = true;
  // set md5 of the saved image
  savedDigest = new MD5Hash(digester.digest());

  LOG.info("Image file " + newFile + " of size " + newFile.length() +
      " bytes saved in " + (now() - startTime)/1000 + " seconds.");
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:74,代码来源:FSImageFormat.java


注:本文中的org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress.endStep方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。