当前位置: 首页>>代码示例>>Java>>正文


Java HdfsConstants.INVALID_TXID属性代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.server.common.HdfsConstants.INVALID_TXID属性的典型用法代码示例。如果您正苦于以下问题:Java HdfsConstants.INVALID_TXID属性的具体用法?Java HdfsConstants.INVALID_TXID怎么用?Java HdfsConstants.INVALID_TXID使用的例子?那么恭喜您, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在org.apache.hadoop.hdfs.server.common.HdfsConstants的用法示例。


在下文中一共展示了HdfsConstants.INVALID_TXID属性的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: validateEditLog

public static FSEditLogLoader.EditLogValidation validateEditLog(
    LedgerHandleProvider ledgerProvider,
    EditLogLedgerMetadata ledgerMetadata) throws IOException {
  BookKeeperEditLogInputStream in;
  try {
    in = new BookKeeperEditLogInputStream(ledgerProvider,
        ledgerMetadata.getLedgerId(), 0, ledgerMetadata.getFirstTxId(),
        ledgerMetadata.getLastTxId(), ledgerMetadata.getLastTxId() == -1);
  } catch (LedgerHeaderCorruptException e) {
    LOG.warn("Log at ledger id" + ledgerMetadata.getLedgerId() +
        " has no valid header", e);
    return new FSEditLogLoader.EditLogValidation(0,
        HdfsConstants.INVALID_TXID, HdfsConstants.INVALID_TXID, true);
  }

  try {
    return FSEditLogLoader.validateEditLog(in);
  } finally {
    IOUtils.closeStream(in);
  }
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:21,代码来源:BookKeeperEditLogInputStream.java

示例2: validateAndGetEndTxId

long validateAndGetEndTxId(EditLogLedgerMetadata ledger, boolean fence)
    throws IOException {
  FSEditLogLoader.EditLogValidation val;
  if (!fence) {
    val = BookKeeperEditLogInputStream.validateEditLog(this, ledger);
  } else {
    val = BookKeeperEditLogInputStream.validateEditLog(
        new FencingLedgerHandleProvider(), ledger);
  }
  InjectionHandler.processEvent(InjectionEvent.BKJM_VALIDATELOGSEGMENT,
      val);
  if (val.getNumTransactions() == 0) {
    return HdfsConstants.INVALID_TXID; // Ledger is corrupt
  }
  return val.getEndTxId();
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:16,代码来源:BookKeeperJournalManager.java

示例3: getJournalInputStreamDontCheckLastTxId

static EditLogInputStream getJournalInputStreamDontCheckLastTxId(
    JournalManager jm, long txId) throws IOException {
  List<EditLogInputStream> streams = new ArrayList<EditLogInputStream>();
  jm.selectInputStreams(streams, txId, true, false);
  if (streams.size() < 1) {
    throw new IOException("Cannot obtain stream for txid: " + txId);
  }
  Collections.sort(streams, JournalSet.EDIT_LOG_INPUT_STREAM_COMPARATOR);

  if (txId == HdfsConstants.INVALID_TXID) {
    return streams.get(0);
  }

  for (EditLogInputStream elis : streams) {
    if (elis.getFirstTxId() == txId) {
      return elis;
    }
  }
  throw new IOException("Cannot obtain stream for txid: " + txId);
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:20,代码来源:TestBookKeeperJournalManager.java

示例4: getSegmentInfo

/**
 * @return the current state of the given segment, or null if the
 * segment does not exist.
 */
private SegmentStateProto getSegmentInfo(long segmentTxId)
    throws IOException {
  EditLogFile elf = fjm.getLogFile(segmentTxId);
  if (elf == null) {
    return null;
  }
  if (elf.isInProgress()) {
    elf.validateLog();
  }
  if (elf.getLastTxId() == HdfsConstants.INVALID_TXID) {
    LOG.info("Edit log file " + elf + " appears to be empty. " +
        "Moving it aside...");
    elf.moveAsideEmptyFile();
    return null;
  }
  SegmentStateProto ret = new SegmentStateProto(segmentTxId, elf.getLastTxId(), elf.isInProgress());
  LOG.info("getSegmentInfo(" + segmentTxId + "): " + elf + " -> " + ret);
  return ret;
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:23,代码来源:Journal.java

示例5: scanStorageForLatestEdits

/**
 * Scan the local storage directory, and return the segment containing
 * the highest transaction.
 * @return the EditLogFile with the highest transactions, or null
 * if no files exist.
 */
private synchronized EditLogFile scanStorageForLatestEdits() throws IOException {
  if (!fjm.getStorageDirectory().getCurrentDir().exists()) {
    return null;
  }
  
  LOG.info("Scanning storage " + fjm);
  List<EditLogFile> files = fjm.getLogFiles(0);
  
  while (!files.isEmpty()) {
    EditLogFile latestLog = files.remove(files.size() - 1);
    latestLog.validateLog();
    LOG.info("Latest log is " + latestLog);
    if (latestLog.getLastTxId() == HdfsConstants.INVALID_TXID) {
      // the log contains no transactions
      LOG.warn("Latest log " + latestLog + " has no transactions. " +
          "moving it aside and looking for previous log");
      latestLog.moveAsideEmptyFile();
    } else {
      return latestLog;
    }
  }
  
  LOG.info("No files in " + fjm);
  return null;
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:31,代码来源:Journal.java

示例6: validateEditLog

static FSEditLogLoader.EditLogValidation validateEditLog(File file) throws IOException {
  EditLogFileInputStream in;
  try {
    in = new EditLogFileInputStream(file);
    in.getVersion();
  } catch (LogHeaderCorruptException corrupt) {
    // If it's missing its header, this is equivalent to no transactions
    FSImage.LOG.warn("Log at " + file + " has no valid header",
        corrupt);
    return new FSEditLogLoader.EditLogValidation(0, HdfsConstants.INVALID_TXID, 
                                                 HdfsConstants.INVALID_TXID, true);
  }
  
  try {
    return FSEditLogLoader.validateEditLog(in);
  } finally {
    IOUtils.closeStream(in);
  }
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:19,代码来源:EditLogFileInputStream.java

示例7: validateEditLog

static FSEditLogLoader.EditLogValidation validateEditLog(File file) throws IOException {
  EditLogFileInputStream in;
  try {
    in = new EditLogFileInputStream(file);
  } catch (LogHeaderCorruptException corrupt) {
    // If it's missing its header, this is equivalent to no transactions
    FSImage.LOG.warn("Log at " + file + " has no valid header",
        corrupt);
    return new FSEditLogLoader.EditLogValidation(0, HdfsConstants.INVALID_TXID, 
                                                 HdfsConstants.INVALID_TXID);
  }
  
  try {
    return FSEditLogLoader.validateEditLog(in);
  } finally {
    IOUtils.closeStream(in);
  }
}
 
开发者ID:iVCE,项目名称:RDFS,代码行数:18,代码来源:EditLogFileInputStream.java

示例8: hasMissingValidSegments

/**
 * Check if the recovery is needed. If we have all segments within this
 * range, then there is no need to do anything.
 */
boolean hasMissingValidSegments() throws IOException {
  // rangeSet() has not been called
  if (recoveryStartTxid == HdfsConstants.INVALID_TXID
      || recoveryEndTxid == HdfsConstants.INVALID_TXID) {
    throwIOException(logMsg + " task range is not set!");
  }
  // if there is no valid segments, there is something missing
  if (validSegments.isEmpty()) {
    return true;
  }
  // check first and last transaction of the range
  if (recoveryStartTxid != validSegments.get(0).getFirstTxId()
      || recoveryEndTxid != validSegments.get(validSegments.size() - 1)
          .getLastTxId()) {
    return true;
  }
  // check if the valid segments are contiguous
  for (int i = 0; i < validSegments.size() - 1; i++) {
    if (validSegments.get(i).getLastTxId() != validSegments.get(i + 1)
        .getFirstTxId() - 1) {
      return true;
    }
  }
  // valid segments cover the entire range of transactions
  return false;
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:30,代码来源:JournalNodeJournalSyncer.java

示例9: getNumTransactions

public long getNumTransactions() {
  if (endTxId == HdfsConstants.INVALID_TXID
      || startTxId == HdfsConstants.INVALID_TXID) {
    return 0;
  }
  return (endTxId - startTxId) + 1;
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:7,代码来源:FSEditLogLoader.java

示例10: reset

@Override
public DataOutputBuffer reset() {
  super.reset();
  firstTxId = HdfsConstants.INVALID_TXID;
  numTxns = 0;
  return this;
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:7,代码来源:EditsDoubleBuffer.java

示例11: getLatestImage

@Override
FSImageFile getLatestImage() throws IOException {
  // We should have at least one image and one edits dirs
  if (latestNameSD == null)
    throw new IOException("Image file is not found in " + imageDirs);
  if (latestEditsSD == null)
    throw new IOException("Edits file is not found in " + editsDirs);
  
  // Make sure we are loading image and edits from same checkpoint
  if (latestNameCheckpointTime > latestEditsCheckpointTime
      && latestNameSD != latestEditsSD
      && latestNameSD.getStorageDirType() == NameNodeDirType.IMAGE
      && latestEditsSD.getStorageDirType() == NameNodeDirType.EDITS) {
    // This is a rare failure when NN has image-only and edits-only
    // storage directories, and fails right after saving images,
    // in some of the storage directories, but before purging edits.
    // See -NOTE- in saveNamespace().
    LOG.error("This is a rare failure scenario!!!");
    LOG.error("Image checkpoint time " + latestNameCheckpointTime +
              " > edits checkpoint time " + latestEditsCheckpointTime);
    LOG.error("Name-node will treat the image as the latest state of " +
              "the namespace. Old edits will be discarded.");
  } else if (latestNameCheckpointTime != latestEditsCheckpointTime) {
    throw new IOException("Inconsistent storage detected, " +
                    "image and edits checkpoint times do not match. " +
                    "image checkpoint time = " + latestNameCheckpointTime +
                    "edits checkpoint time = " + latestEditsCheckpointTime);
  }

  needToSaveAfterRecovery = doRecovery();
  
  return new FSImageFile(latestNameSD, 
      NNStorage.getStorageFile(latestNameSD, NameNodeFile.IMAGE),
      HdfsConstants.INVALID_TXID, null);
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:35,代码来源:FSImagePreTransactionalStorageInspector.java

示例12: setRange

void setRange(long recoveryStartTxid, long recoveryEndTxid)
    throws IOException {
  // transaction ids must be valid
  if (recoveryStartTxid >= recoveryEndTxid
      || recoveryStartTxid <= HdfsConstants.INVALID_TXID
      || recoveryEndTxid <= HdfsConstants.INVALID_TXID) {
    throwIOException(logMsg + "Illegal start/end transactions: "
        + recoveryStartTxid + " : " + recoveryEndTxid);
  }
  this.recoveryStartTxid = recoveryStartTxid;
  this.recoveryEndTxid = recoveryEndTxid;
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:12,代码来源:JournalNodeJournalSyncer.java

示例13: resetTxIds

public void resetTxIds(long txid) throws IOException {
  this.txid = txid;
  this.synctxid = txid;
  this.curSegmentTxId = HdfsConstants.INVALID_TXID;
  this.state = State.BETWEEN_LOG_SEGMENTS;
  
  // Journals need to reset their committed IDs.
  journalSet.setCommittedTxId(txid, true);
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:9,代码来源:FSEditLog.java

示例14: RedundantEditLogInputStream

public RedundantEditLogInputStream(Collection<EditLogInputStream> streams,
    long startTxId, boolean inProgressOk) {
  this.curIdx = 0;
  this.prevTxId = (startTxId == HdfsConstants.INVALID_TXID) ?
    HdfsConstants.INVALID_TXID : (startTxId - 1);
  this.state = (streams.isEmpty()) ? State.EOF : State.SKIP_UNTIL;
  this.prevException = null;
  this.inProgressOk = inProgressOk;
  // EditLogInputStreams in a RedundantEditLogInputStream can't be pre-transactional.
  EditLogInputStream first = null;
  for (EditLogInputStream s : streams) {
    Preconditions.checkArgument(s.getFirstTxId() !=
        HdfsConstants.INVALID_TXID, "invalid first txid in stream: %s", s);
    if (!inProgressOk) { // extra checks for startup
      Preconditions.checkArgument(
          s.getLastTxId() != HdfsConstants.INVALID_TXID,
          "invalid last txid in stream: %s", s);
      Preconditions.checkArgument(!s.isInProgress(),
          "segment should not be inprogress: %s", s);
    }
    if (first == null) {
      first = s;
    } else {
      Preconditions.checkArgument(s.getFirstTxId() == first.getFirstTxId(),
        "All streams in the RedundantEditLogInputStream must have the same " +
        "start transaction ID!  " + first + " had start txId " +
        first.getFirstTxId() + ", but " + s + " had start txId " +
        s.getFirstTxId());
    }
  }

  this.streams = streams.toArray(new EditLogInputStream[streams.size()]);

  // We sort the streams here so that the streams that end later come first.
  Arrays.sort(this.streams, segmentComparator);
  LOG.info("Created stream: " + getName());
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:37,代码来源:RedundantEditLogInputStream.java

示例15: addStreamsToCollectionFromFiles

void addStreamsToCollectionFromFiles(Collection<EditLogFile> elfs,
    Collection<EditLogInputStream> streams, long fromTxId,
    boolean inProgressOk, boolean validateInProgressSegments)
    throws IOException {
  for (EditLogFile elf : elfs) {
    if (elf.isInProgress()) {
      if (!inProgressOk) {
        LOG.debug("passing over " + elf + " because it is in progress "
            + "and we are ignoring in-progress logs.");
        continue;
      }
      if (validateInProgressSegments) {
        try {
          elf.validateLog();
        } catch (IOException e) {
          LOG.error("got IOException while trying to validate header of "
              + elf + ".  Skipping.", e);
          continue;
        }
      } else {
        LOG.info("Skipping validation of edit segment: " + elf);
      }
    }
    if (elf.lastTxId != HdfsConstants.INVALID_TXID && elf.lastTxId < fromTxId) {
      LOG.info("passing over " + elf + " because it ends at " + elf.lastTxId
          + ", but we only care about transactions " + "as new as "
          + fromTxId);
      continue;
    }
    EditLogFileInputStream elfis = new EditLogFileInputStream(elf.getFile(),
        elf.getFirstTxId(), elf.getLastTxId(), elf.isInProgress());
    elfis.setJournalManager(this);
    streams.add(elfis);
  }
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:35,代码来源:FileJournalManager.java


注:本文中的org.apache.hadoop.hdfs.server.common.HdfsConstants.INVALID_TXID属性示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。