当前位置: 首页>>代码示例>>Java>>正文


Java JournalSet类代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.server.namenode.JournalSet的典型用法代码示例。如果您正苦于以下问题:Java JournalSet类的具体用法?Java JournalSet怎么用?Java JournalSet使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


JournalSet类属于org.apache.hadoop.hdfs.server.namenode包,在下文中一共展示了JournalSet类的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: getJournalInputStreamDontCheckLastTxId

import org.apache.hadoop.hdfs.server.namenode.JournalSet; //导入依赖的package包/类
static EditLogInputStream getJournalInputStreamDontCheckLastTxId(
    JournalManager jm, long txId) throws IOException {
  List<EditLogInputStream> streams = new ArrayList<EditLogInputStream>();
  jm.selectInputStreams(streams, txId, true, false);
  if (streams.size() < 1) {
    throw new IOException("Cannot obtain stream for txid: " + txId);
  }
  Collections.sort(streams, JournalSet.EDIT_LOG_INPUT_STREAM_COMPARATOR);

  if (txId == HdfsConstants.INVALID_TXID) {
    return streams.get(0);
  }

  for (EditLogInputStream elis : streams) {
    if (elis.getFirstTxId() == txId) {
      return elis;
    }
  }
  throw new IOException("Cannot obtain stream for txid: " + txId);
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:21,代码来源:TestBookKeeperJournalManager.java

示例2: testSBNCheckpoints

import org.apache.hadoop.hdfs.server.namenode.JournalSet; //导入依赖的package包/类
@Test
public void testSBNCheckpoints() throws Exception {
  JournalSet standbyJournalSet = NameNodeAdapter.spyOnJournalSet(nn1);
  
  doEdits(0, 10);
  HATestUtil.waitForStandbyToCatchUp(nn0, nn1);
  // Once the standby catches up, it should notice that it needs to
  // do a checkpoint and save one to its local directories.
  HATestUtil.waitForCheckpoint(cluster, 1, ImmutableList.of(12));
  
  // It should also upload it back to the active.
  HATestUtil.waitForCheckpoint(cluster, 0, ImmutableList.of(12));
  
  // The standby should never try to purge edit logs on shared storage.
  Mockito.verify(standbyJournalSet, Mockito.never()).
    purgeLogsOlderThan(Mockito.anyLong());
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:18,代码来源:TestStandbyCheckpoints.java

示例3: selectInputStreams

import org.apache.hadoop.hdfs.server.namenode.JournalSet; //导入依赖的package包/类
@Override
public void selectInputStreams(Collection<EditLogInputStream> streams,
    long fromTxnId, boolean inProgressOk) throws IOException {

  QuorumCall<AsyncLogger, RemoteEditLogManifest> q =
      loggers.getEditLogManifest(fromTxnId, inProgressOk);
  Map<AsyncLogger, RemoteEditLogManifest> resps =
      loggers.waitForWriteQuorum(q, selectInputStreamsTimeoutMs,
          "selectInputStreams");
  
  LOG.debug("selectInputStream manifests:\n" +
      Joiner.on("\n").withKeyValueSeparator(": ").join(resps));
  
  final PriorityQueue<EditLogInputStream> allStreams = 
      new PriorityQueue<EditLogInputStream>(64,
          JournalSet.EDIT_LOG_INPUT_STREAM_COMPARATOR);
  for (Map.Entry<AsyncLogger, RemoteEditLogManifest> e : resps.entrySet()) {
    AsyncLogger logger = e.getKey();
    RemoteEditLogManifest manifest = e.getValue();
    
    for (RemoteEditLog remoteLog : manifest.getLogs()) {
      URL url = logger.buildURLToFetchLogs(remoteLog.getStartTxId());

      EditLogInputStream elis = EditLogFileInputStream.fromUrl(
          connectionFactory, url, remoteLog.getStartTxId(),
          remoteLog.getEndTxId(), remoteLog.isInProgress());
      allStreams.add(elis);
    }
  }
  JournalSet.chainAndMakeRedundantStreams(streams, allStreams, fromTxnId);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:32,代码来源:QuorumJournalManager.java

示例4: setupCurrentEditStream

import org.apache.hadoop.hdfs.server.namenode.JournalSet; //导入依赖的package包/类
/**
 * Setup the input stream to be consumed by the reader. The input stream
 * corresponds to a single segment.
 */
private void setupCurrentEditStream(long txid) throws IOException {
  // get new stream
  currentEditLogInputStream = JournalSet.getInputStream(remoteJournalManager,
      txid);
  // we just started a new log segment
  currentSegmentTxId = txid;
  // indicate that we successfully reopened the stream
  mostRecentlyReadTransactionTime = now();
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:14,代码来源:ServerLogReaderTransactional.java

示例5: selectInputStreams

import org.apache.hadoop.hdfs.server.namenode.JournalSet; //导入依赖的package包/类
/**
 * Select input streams.
 * inProgressOk should be true only for tailing, not for startup
 */
@Override
public void selectInputStreams(Collection<EditLogInputStream> streams,
    long fromTxnId, boolean inProgressOk, boolean validateInProgressSegments)
    throws IOException {

  QuorumCall<AsyncLogger, RemoteEditLogManifest> q =
      loggers.getEditLogManifest(fromTxnId);
  // we insist on getting all responses, even if they are to be exceptions
  // this will fail if we cannot get majority of successes
  Map<AsyncLogger, RemoteEditLogManifest> resps = loggers
      .waitForReadQuorumWithAllResponses(q, selectInputStreamsTimeoutMs,
          "selectInputStreams");
  
  if(LOG.isDebugEnabled()) {
    LOG.debug("selectInputStream manifests:\n" +
        Joiner.on("\n").withKeyValueSeparator(": ").join(resps));
  }
  
  final PriorityQueue<EditLogInputStream> allStreams = 
      new PriorityQueue<EditLogInputStream>(64,
          JournalSet.EDIT_LOG_INPUT_STREAM_COMPARATOR);
  for (Map.Entry<AsyncLogger, RemoteEditLogManifest> e : resps.entrySet()) {
    AsyncLogger logger = e.getKey();
    RemoteEditLogManifest manifest = e.getValue();
    
    for (RemoteEditLog remoteLog : manifest.getLogs()) {
      EditLogInputStream elis = new URLLogInputStream(logger,
          remoteLog.getStartTxId(), httpConnectReadTimeoutMs);
      if (elis.isInProgress() && !inProgressOk) {
        continue;
      }
      allStreams.add(elis);
    }
  }
  // we pass 0 as min redundance as we do not care about this here
  JournalSet.chainAndMakeRedundantStreams(
      streams, allStreams, fromTxnId, inProgressOk, 0);
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:43,代码来源:QuorumJournalManager.java

示例6: selectInputStreams

import org.apache.hadoop.hdfs.server.namenode.JournalSet; //导入依赖的package包/类
@Override
public void selectInputStreams(Collection<EditLogInputStream> streams,
    long fromTxnId, boolean inProgressOk, boolean forReading) throws IOException {

  QuorumCall<AsyncLogger, RemoteEditLogManifest> q =
      loggers.getEditLogManifest(fromTxnId, forReading);
  Map<AsyncLogger, RemoteEditLogManifest> resps =
      loggers.waitForWriteQuorum(q, selectInputStreamsTimeoutMs,
          "selectInputStreams");
  
  LOG.debug("selectInputStream manifests:\n" +
      Joiner.on("\n").withKeyValueSeparator(": ").join(resps));
  
  final PriorityQueue<EditLogInputStream> allStreams = 
      new PriorityQueue<EditLogInputStream>(64,
          JournalSet.EDIT_LOG_INPUT_STREAM_COMPARATOR);
  for (Map.Entry<AsyncLogger, RemoteEditLogManifest> e : resps.entrySet()) {
    AsyncLogger logger = e.getKey();
    RemoteEditLogManifest manifest = e.getValue();
    
    for (RemoteEditLog remoteLog : manifest.getLogs()) {
      URL url = logger.buildURLToFetchLogs(remoteLog.getStartTxId());

      EditLogInputStream elis = EditLogFileInputStream.fromUrl(
          url, remoteLog.getStartTxId(), remoteLog.getEndTxId(),
          remoteLog.isInProgress());
      allStreams.add(elis);
    }
  }
  JournalSet.chainAndMakeRedundantStreams(
      streams, allStreams, fromTxnId, inProgressOk);
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:33,代码来源:QuorumJournalManager.java

示例7: selectInputStreams

import org.apache.hadoop.hdfs.server.namenode.JournalSet; //导入依赖的package包/类
@Override
public void selectInputStreams(Collection<EditLogInputStream> streams,
    long fromTxnId, boolean inProgressOk, boolean forReading) throws IOException {

  QuorumCall<AsyncLogger, RemoteEditLogManifest> q =
      loggers.getEditLogManifest(fromTxnId, forReading, inProgressOk);
  Map<AsyncLogger, RemoteEditLogManifest> resps =
      loggers.waitForWriteQuorum(q, selectInputStreamsTimeoutMs,
          "selectInputStreams");
  
  LOG.debug("selectInputStream manifests:\n" +
      Joiner.on("\n").withKeyValueSeparator(": ").join(resps));
  
  final PriorityQueue<EditLogInputStream> allStreams = 
      new PriorityQueue<EditLogInputStream>(64,
          JournalSet.EDIT_LOG_INPUT_STREAM_COMPARATOR);
  for (Map.Entry<AsyncLogger, RemoteEditLogManifest> e : resps.entrySet()) {
    AsyncLogger logger = e.getKey();
    RemoteEditLogManifest manifest = e.getValue();
    
    for (RemoteEditLog remoteLog : manifest.getLogs()) {
      URL url = logger.buildURLToFetchLogs(remoteLog.getStartTxId());

      EditLogInputStream elis = EditLogFileInputStream.fromUrl(
          url, remoteLog.getStartTxId(), remoteLog.getEndTxId(),
          remoteLog.isInProgress());
      allStreams.add(elis);
    }
  }
  JournalSet.chainAndMakeRedundantStreams(streams, allStreams, fromTxnId);
}
 
开发者ID:chendave,项目名称:hadoop-TCP,代码行数:32,代码来源:QuorumJournalManager.java


注:本文中的org.apache.hadoop.hdfs.server.namenode.JournalSet类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。