当前位置: 首页>>代码示例>>Java>>正文


Java RemoteEditLog.getEndTxId方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.server.protocol.RemoteEditLog.getEndTxId方法的典型用法代码示例。如果您正苦于以下问题:Java RemoteEditLog.getEndTxId方法的具体用法?Java RemoteEditLog.getEndTxId怎么用?Java RemoteEditLog.getEndTxId使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.server.protocol.RemoteEditLog的用法示例。


在下文中一共展示了RemoteEditLog.getEndTxId方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: rollForwardByApplyingLogs

import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog; //导入方法依赖的package包/类
static void rollForwardByApplyingLogs(
    RemoteEditLogManifest manifest,
    FSImage dstImage,
    FSNamesystem dstNamesystem) throws IOException {
  NNStorage dstStorage = dstImage.getStorage();

  List<EditLogInputStream> editsStreams = Lists.newArrayList();    
  for (RemoteEditLog log : manifest.getLogs()) {
    if (log.getEndTxId() > dstImage.getLastAppliedTxId()) {
      File f = dstStorage.findFinalizedEditsFile(
          log.getStartTxId(), log.getEndTxId());
      editsStreams.add(new EditLogFileInputStream(f, log.getStartTxId(), 
                                                  log.getEndTxId(), true));
    }
  }
  LOG.info("Checkpointer about to load edits from " +
      editsStreams.size() + " stream(s).");
  dstImage.loadEdits(editsStreams, dstNamesystem);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:20,代码来源:Checkpointer.java

示例2: rollForwardByApplyingLogs

import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog; //导入方法依赖的package包/类
static void rollForwardByApplyingLogs(
    RemoteEditLogManifest manifest,
    FSImage dstImage,
    FSNamesystem dstNamesystem) throws IOException {
  NNStorage dstStorage = dstImage.getStorage();

  List<EditLogInputStream> editsStreams = Lists.newArrayList();    
  for (RemoteEditLog log : manifest.getLogs()) {
    if (log.getEndTxId() > dstImage.getLastAppliedTxId()) {
      File f = dstStorage.findFinalizedEditsFile(
          log.getStartTxId(), log.getEndTxId());
      editsStreams.add(new EditLogFileInputStream(f, log.getStartTxId(), 
                                                  log.getEndTxId(), true));
    }
  }
  LOG.info("Checkpointer about to load edits from " +
      editsStreams.size() + " stream(s).");
  dstImage.loadEdits(editsStreams, dstNamesystem, null);
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:20,代码来源:Checkpointer.java

示例3: getParamStringForLog

import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog; //导入方法依赖的package包/类
static String getParamStringForLog(RemoteEditLog log,
    StorageInfo remoteStorageInfo) {
  return "getedit=1&" + START_TXID_PARAM + "=" + log.getStartTxId()
      + "&" + END_TXID_PARAM + "=" + log.getEndTxId()
      + "&" + STORAGEINFO_PARAM + "=" +
        remoteStorageInfo.toColonSeparatedString();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:8,代码来源:ImageServlet.java

示例4: getParamStringForLog

import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog; //导入方法依赖的package包/类
static String getParamStringForLog(RemoteEditLog log,
    StorageInfo remoteStorageInfo, boolean throttle) {
  return "getedit" + "=1&" + START_TXID_PARAM + "=" + log.getStartTxId()
      + "&" + END_TXID_PARAM + "=" + log.getEndTxId()
      + "&" + STORAGEINFO_PARAM + "=" +
        remoteStorageInfo.toColonSeparatedString()
      + "&" + THROTTLE_PARAM + "=" + throttle;
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:9,代码来源:GetImageServlet.java

示例5: getEditLogManifest

import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog; //导入方法依赖的package包/类
/**
 * Return a manifest of what finalized edit logs are available. All available
 * edit logs are returned starting from the transaction id passed. If
 * 'fromTxId' falls in the middle of a log, that log is returned as well.
 * 
 * @param fromTxId Starting transaction id to read the logs.
 * @return RemoteEditLogManifest object.
 */
public synchronized RemoteEditLogManifest getEditLogManifest(long fromTxId) {
  // Collect RemoteEditLogs available from each FileJournalManager
  List<RemoteEditLog> allLogs = Lists.newArrayList();
  for (JournalAndStream j : journals) {
    if (j.getManager() instanceof FileJournalManager) {
      FileJournalManager fjm = (FileJournalManager)j.getManager();
      try {
        allLogs.addAll(fjm.getRemoteEditLogs(fromTxId, false));
      } catch (Throwable t) {
        LOG.warn("Cannot list edit logs in " + fjm, t);
      }
    }
  }
  
  // Group logs by their starting txid
  ImmutableListMultimap<Long, RemoteEditLog> logsByStartTxId =
    Multimaps.index(allLogs, RemoteEditLog.GET_START_TXID);
  long curStartTxId = fromTxId;

  List<RemoteEditLog> logs = Lists.newArrayList();
  while (true) {
    ImmutableList<RemoteEditLog> logGroup = logsByStartTxId.get(curStartTxId);
    if (logGroup.isEmpty()) {
      // we have a gap in logs - for example because we recovered some old
      // storage directory with ancient logs. Clear out any logs we've
      // accumulated so far, and then skip to the next segment of logs
      // after the gap.
      SortedSet<Long> startTxIds = Sets.newTreeSet(logsByStartTxId.keySet());
      startTxIds = startTxIds.tailSet(curStartTxId);
      if (startTxIds.isEmpty()) {
        break;
      } else {
        if (LOG.isDebugEnabled()) {
          LOG.debug("Found gap in logs at " + curStartTxId + ": " +
              "not returning previous logs in manifest.");
        }
        logs.clear();
        curStartTxId = startTxIds.first();
        continue;
      }
    }

    // Find the one that extends the farthest forward
    RemoteEditLog bestLog = Collections.max(logGroup);
    logs.add(bestLog);
    // And then start looking from after that point
    curStartTxId = bestLog.getEndTxId() + 1;
  }
  RemoteEditLogManifest ret = new RemoteEditLogManifest(logs);
  
  if (LOG.isDebugEnabled()) {
    LOG.debug("Generated manifest for logs since " + fromTxId + ":"
        + ret);      
  }
  return ret;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:65,代码来源:JournalSet.java

示例6: getEditLogManifest

import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog; //导入方法依赖的package包/类
/**
 * Return a manifest of what edit logs are available. All available
 * edit logs are returned starting from the transaction id passed,
 * including inprogress segments.
 * 
 * @param fromTxId Starting transaction id to read the logs.
 * @return RemoteEditLogManifest object.
 */
public synchronized RemoteEditLogManifest getEditLogManifest(long fromTxId) {
  // Collect RemoteEditLogs available from each FileJournalManager
  List<RemoteEditLog> allLogs = new ArrayList<RemoteEditLog>();
  for (JournalAndStream j : journals) {
    JournalManager jm = j.getManager();
    try {
      allLogs.addAll(jm.getEditLogManifest(fromTxId).getLogs());
    } catch (Throwable t) {
      LOG.warn("Cannot list edit logs in " + jm, t);
    }
  }

  // Group logs by their starting txid
  ImmutableListMultimap<Long, RemoteEditLog> logsByStartTxId =
    Multimaps.index(allLogs, RemoteEditLog.GET_START_TXID);
  long curStartTxId = fromTxId;

  List<RemoteEditLog> logs = new ArrayList<RemoteEditLog>();
  while (true) {
    ImmutableList<RemoteEditLog> logGroup = logsByStartTxId.get(curStartTxId);
    if (logGroup.isEmpty()) {
      // we have a gap in logs - for example because we recovered some old
      // storage directory with ancient logs. Clear out any logs we've
      // accumulated so far, and then skip to the next segment of logs
      // after the gap.
      SortedSet<Long> startTxIds = new TreeSet<Long>(logsByStartTxId.keySet());
      startTxIds = startTxIds.tailSet(curStartTxId);
      if (startTxIds.isEmpty()) {
        break;
      } else {
        if (LOG.isDebugEnabled()) {
          LOG.debug("Found gap in logs at " + curStartTxId + ": " +
              "not returning previous logs in manifest.");
        }
        logs.clear();
        curStartTxId = startTxIds.first();
        continue;
      }
    }

    // Find the one that extends the farthest forward
    RemoteEditLog bestLog = Collections.max(logGroup);
    logs.add(bestLog);
    // And then start looking from after that point
    curStartTxId = bestLog.getEndTxId() + 1;
    if (curStartTxId == 0)
      break;
  }
  RemoteEditLogManifest ret = new RemoteEditLogManifest(logs);
  
  if (LOG.isDebugEnabled()) {
    LOG.debug("Generated manifest for logs since " + fromTxId + ":"
        + ret);      
  }
  return ret;
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:65,代码来源:JournalSet.java

示例7: getEditLogManifest

import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog; //导入方法依赖的package包/类
/**
 * Return a manifest of what finalized edit logs are available. All available
 * edit logs are returned starting from the transaction id passed.
 * 
 * @param fromTxId Starting transaction id to read the logs.
 * @return RemoteEditLogManifest object.
 */
public synchronized RemoteEditLogManifest getEditLogManifest(long fromTxId,
    boolean forReading) {
  // Collect RemoteEditLogs available from each FileJournalManager
  List<RemoteEditLog> allLogs = Lists.newArrayList();
  for (JournalAndStream j : journals) {
    if (j.getManager() instanceof FileJournalManager) {
      FileJournalManager fjm = (FileJournalManager)j.getManager();
      try {
        allLogs.addAll(fjm.getRemoteEditLogs(fromTxId, forReading));
      } catch (Throwable t) {
        LOG.warn("Cannot list edit logs in " + fjm, t);
      }
    }
  }
  
  // Group logs by their starting txid
  ImmutableListMultimap<Long, RemoteEditLog> logsByStartTxId =
    Multimaps.index(allLogs, RemoteEditLog.GET_START_TXID);
  long curStartTxId = fromTxId;

  List<RemoteEditLog> logs = Lists.newArrayList();
  while (true) {
    ImmutableList<RemoteEditLog> logGroup = logsByStartTxId.get(curStartTxId);
    if (logGroup.isEmpty()) {
      // we have a gap in logs - for example because we recovered some old
      // storage directory with ancient logs. Clear out any logs we've
      // accumulated so far, and then skip to the next segment of logs
      // after the gap.
      SortedSet<Long> startTxIds = Sets.newTreeSet(logsByStartTxId.keySet());
      startTxIds = startTxIds.tailSet(curStartTxId);
      if (startTxIds.isEmpty()) {
        break;
      } else {
        if (LOG.isDebugEnabled()) {
          LOG.debug("Found gap in logs at " + curStartTxId + ": " +
              "not returning previous logs in manifest.");
        }
        logs.clear();
        curStartTxId = startTxIds.first();
        continue;
      }
    }

    // Find the one that extends the farthest forward
    RemoteEditLog bestLog = Collections.max(logGroup);
    logs.add(bestLog);
    // And then start looking from after that point
    curStartTxId = bestLog.getEndTxId() + 1;
  }
  RemoteEditLogManifest ret = new RemoteEditLogManifest(logs);
  
  if (LOG.isDebugEnabled()) {
    LOG.debug("Generated manifest for logs since " + fromTxId + ":"
        + ret);      
  }
  return ret;
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:65,代码来源:JournalSet.java

示例8: getEditLogManifest

import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog; //导入方法依赖的package包/类
/**
 * Return a manifest of what finalized edit logs are available. All available
 * edit logs are returned starting from the transaction id passed.
 * 
 * @param fromTxId Starting transaction id to read the logs.
 * @return RemoteEditLogManifest object.
 */
public synchronized RemoteEditLogManifest getEditLogManifest(long fromTxId,
    boolean forReading) {
  // Collect RemoteEditLogs available from each FileJournalManager
  List<RemoteEditLog> allLogs = Lists.newArrayList();
  for (JournalAndStream j : journals) {
    if (j.getManager() instanceof FileJournalManager) {
      FileJournalManager fjm = (FileJournalManager)j.getManager();
      try {
        allLogs.addAll(fjm.getRemoteEditLogs(fromTxId, forReading, false));
      } catch (Throwable t) {
        LOG.warn("Cannot list edit logs in " + fjm, t);
      }
    }
  }
  
  // Group logs by their starting txid
  ImmutableListMultimap<Long, RemoteEditLog> logsByStartTxId =
    Multimaps.index(allLogs, RemoteEditLog.GET_START_TXID);
  long curStartTxId = fromTxId;

  List<RemoteEditLog> logs = Lists.newArrayList();
  while (true) {
    ImmutableList<RemoteEditLog> logGroup = logsByStartTxId.get(curStartTxId);
    if (logGroup.isEmpty()) {
      // we have a gap in logs - for example because we recovered some old
      // storage directory with ancient logs. Clear out any logs we've
      // accumulated so far, and then skip to the next segment of logs
      // after the gap.
      SortedSet<Long> startTxIds = Sets.newTreeSet(logsByStartTxId.keySet());
      startTxIds = startTxIds.tailSet(curStartTxId);
      if (startTxIds.isEmpty()) {
        break;
      } else {
        if (LOG.isDebugEnabled()) {
          LOG.debug("Found gap in logs at " + curStartTxId + ": " +
              "not returning previous logs in manifest.");
        }
        logs.clear();
        curStartTxId = startTxIds.first();
        continue;
      }
    }

    // Find the one that extends the farthest forward
    RemoteEditLog bestLog = Collections.max(logGroup);
    logs.add(bestLog);
    // And then start looking from after that point
    curStartTxId = bestLog.getEndTxId() + 1;
  }
  RemoteEditLogManifest ret = new RemoteEditLogManifest(logs);
  
  if (LOG.isDebugEnabled()) {
    LOG.debug("Generated manifest for logs since " + fromTxId + ":"
        + ret);      
  }
  return ret;
}
 
开发者ID:chendave,项目名称:hadoop-TCP,代码行数:65,代码来源:JournalSet.java


注:本文中的org.apache.hadoop.hdfs.server.protocol.RemoteEditLog.getEndTxId方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。