本文整理汇总了Java中org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest.getLogs方法的典型用法代码示例。如果您正苦于以下问题:Java RemoteEditLogManifest.getLogs方法的具体用法?Java RemoteEditLogManifest.getLogs怎么用?Java RemoteEditLogManifest.getLogs使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest
的用法示例。
在下文中一共展示了RemoteEditLogManifest.getLogs方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: rollForwardByApplyingLogs
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest; //导入方法依赖的package包/类
static void rollForwardByApplyingLogs(
RemoteEditLogManifest manifest,
FSImage dstImage,
FSNamesystem dstNamesystem) throws IOException {
NNStorage dstStorage = dstImage.getStorage();
List<EditLogInputStream> editsStreams = Lists.newArrayList();
for (RemoteEditLog log : manifest.getLogs()) {
if (log.getEndTxId() > dstImage.getLastAppliedTxId()) {
File f = dstStorage.findFinalizedEditsFile(
log.getStartTxId(), log.getEndTxId());
editsStreams.add(new EditLogFileInputStream(f, log.getStartTxId(),
log.getEndTxId(), true));
}
}
LOG.info("Checkpointer about to load edits from " +
editsStreams.size() + " stream(s).");
dstImage.loadEdits(editsStreams, dstNamesystem);
}
示例2: rollForwardByApplyingLogs
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest; //导入方法依赖的package包/类
static void rollForwardByApplyingLogs(
RemoteEditLogManifest manifest,
FSImage dstImage) throws IOException {
NNStorage dstStorage = dstImage.storage;
List<EditLogInputStream> editsStreams = new ArrayList<EditLogInputStream>();
for (RemoteEditLog log : manifest.getLogs()) {
if (log.inProgress())
break;
File f = dstStorage.findFinalizedEditsFile(
log.getStartTxId(), log.getEndTxId());
if (log.getStartTxId() > dstImage.getLastAppliedTxId()) {
editsStreams.add(new EditLogFileInputStream(f, log.getStartTxId(),
log.getEndTxId(), false));
}
}
dstImage.loadEdits(editsStreams);
}
示例3: rollForwardByApplyingLogs
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest; //导入方法依赖的package包/类
static void rollForwardByApplyingLogs(
RemoteEditLogManifest manifest,
FSImage dstImage,
FSNamesystem dstNamesystem) throws IOException {
NNStorage dstStorage = dstImage.getStorage();
List<EditLogInputStream> editsStreams = Lists.newArrayList();
for (RemoteEditLog log : manifest.getLogs()) {
if (log.getEndTxId() > dstImage.getLastAppliedTxId()) {
File f = dstStorage.findFinalizedEditsFile(
log.getStartTxId(), log.getEndTxId());
editsStreams.add(new EditLogFileInputStream(f, log.getStartTxId(),
log.getEndTxId(), true));
}
}
LOG.info("Checkpointer about to load edits from " +
editsStreams.size() + " stream(s).");
dstImage.loadEdits(editsStreams, dstNamesystem, null);
}
示例4: convert
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest; //导入方法依赖的package包/类
public static RemoteEditLogManifestProto convert(
RemoteEditLogManifest manifest) {
RemoteEditLogManifestProto.Builder builder = RemoteEditLogManifestProto
.newBuilder();
for (RemoteEditLog log : manifest.getLogs()) {
builder.addLogs(convert(log));
}
return builder.build();
}
示例5: selectInputStreams
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest; //导入方法依赖的package包/类
@Override
public void selectInputStreams(Collection<EditLogInputStream> streams,
long fromTxnId, boolean inProgressOk) throws IOException {
QuorumCall<AsyncLogger, RemoteEditLogManifest> q =
loggers.getEditLogManifest(fromTxnId, inProgressOk);
Map<AsyncLogger, RemoteEditLogManifest> resps =
loggers.waitForWriteQuorum(q, selectInputStreamsTimeoutMs,
"selectInputStreams");
LOG.debug("selectInputStream manifests:\n" +
Joiner.on("\n").withKeyValueSeparator(": ").join(resps));
final PriorityQueue<EditLogInputStream> allStreams =
new PriorityQueue<EditLogInputStream>(64,
JournalSet.EDIT_LOG_INPUT_STREAM_COMPARATOR);
for (Map.Entry<AsyncLogger, RemoteEditLogManifest> e : resps.entrySet()) {
AsyncLogger logger = e.getKey();
RemoteEditLogManifest manifest = e.getValue();
for (RemoteEditLog remoteLog : manifest.getLogs()) {
URL url = logger.buildURLToFetchLogs(remoteLog.getStartTxId());
EditLogInputStream elis = EditLogFileInputStream.fromUrl(
connectionFactory, url, remoteLog.getStartTxId(),
remoteLog.getEndTxId(), remoteLog.isInProgress());
allStreams.add(elis);
}
}
JournalSet.chainAndMakeRedundantStreams(streams, allStreams, fromTxnId);
}
示例6: testConvertRemoteEditLogManifest
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest; //导入方法依赖的package包/类
@Test
public void testConvertRemoteEditLogManifest() {
List<RemoteEditLog> logs = new ArrayList<RemoteEditLog>();
logs.add(new RemoteEditLog(1, 10));
logs.add(new RemoteEditLog(11, 20));
RemoteEditLogManifest m = new RemoteEditLogManifest(logs);
RemoteEditLogManifestProto mProto = PBHelper.convert(m);
RemoteEditLogManifest m1 = PBHelper.convert(mProto);
List<RemoteEditLog> logs1 = m1.getLogs();
assertEquals(logs.size(), logs1.size());
for (int i = 0; i < logs.size(); i++) {
compare(logs.get(i), logs1.get(i));
}
}
示例7: getManifest
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest; //导入方法依赖的package包/类
/**
* Get all available log segments present in the underlying storage directory.
* This function will never return null, or empty list of segments - it will
* throw exception in this case.
*/
List<RemoteEditLog> getManifest() throws IOException {
RemoteEditLogManifest rm = remoteJournalManager.getEditLogManifest(-1);
if (rm == null || rm.getLogs().size() == 0) {
throw new IOException("Cannot obtain the list of log segments");
}
return rm.getLogs();
}
示例8: selectInputStreams
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest; //导入方法依赖的package包/类
/**
* Select input streams.
* inProgressOk should be true only for tailing, not for startup
*/
@Override
public void selectInputStreams(Collection<EditLogInputStream> streams,
long fromTxnId, boolean inProgressOk, boolean validateInProgressSegments)
throws IOException {
QuorumCall<AsyncLogger, RemoteEditLogManifest> q =
loggers.getEditLogManifest(fromTxnId);
// we insist on getting all responses, even if they are to be exceptions
// this will fail if we cannot get majority of successes
Map<AsyncLogger, RemoteEditLogManifest> resps = loggers
.waitForReadQuorumWithAllResponses(q, selectInputStreamsTimeoutMs,
"selectInputStreams");
if(LOG.isDebugEnabled()) {
LOG.debug("selectInputStream manifests:\n" +
Joiner.on("\n").withKeyValueSeparator(": ").join(resps));
}
final PriorityQueue<EditLogInputStream> allStreams =
new PriorityQueue<EditLogInputStream>(64,
JournalSet.EDIT_LOG_INPUT_STREAM_COMPARATOR);
for (Map.Entry<AsyncLogger, RemoteEditLogManifest> e : resps.entrySet()) {
AsyncLogger logger = e.getKey();
RemoteEditLogManifest manifest = e.getValue();
for (RemoteEditLog remoteLog : manifest.getLogs()) {
EditLogInputStream elis = new URLLogInputStream(logger,
remoteLog.getStartTxId(), httpConnectReadTimeoutMs);
if (elis.isInProgress() && !inProgressOk) {
continue;
}
allStreams.add(elis);
}
}
// we pass 0 as min redundance as we do not care about this here
JournalSet.chainAndMakeRedundantStreams(
streams, allStreams, fromTxnId, inProgressOk, 0);
}
示例9: selectInputStreams
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest; //导入方法依赖的package包/类
@Override
public void selectInputStreams(Collection<EditLogInputStream> streams,
long fromTxnId, boolean inProgressOk, boolean forReading) throws IOException {
QuorumCall<AsyncLogger, RemoteEditLogManifest> q =
loggers.getEditLogManifest(fromTxnId, forReading);
Map<AsyncLogger, RemoteEditLogManifest> resps =
loggers.waitForWriteQuorum(q, selectInputStreamsTimeoutMs,
"selectInputStreams");
LOG.debug("selectInputStream manifests:\n" +
Joiner.on("\n").withKeyValueSeparator(": ").join(resps));
final PriorityQueue<EditLogInputStream> allStreams =
new PriorityQueue<EditLogInputStream>(64,
JournalSet.EDIT_LOG_INPUT_STREAM_COMPARATOR);
for (Map.Entry<AsyncLogger, RemoteEditLogManifest> e : resps.entrySet()) {
AsyncLogger logger = e.getKey();
RemoteEditLogManifest manifest = e.getValue();
for (RemoteEditLog remoteLog : manifest.getLogs()) {
URL url = logger.buildURLToFetchLogs(remoteLog.getStartTxId());
EditLogInputStream elis = EditLogFileInputStream.fromUrl(
url, remoteLog.getStartTxId(), remoteLog.getEndTxId(),
remoteLog.isInProgress());
allStreams.add(elis);
}
}
JournalSet.chainAndMakeRedundantStreams(
streams, allStreams, fromTxnId, inProgressOk);
}
示例10: selectInputStreams
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest; //导入方法依赖的package包/类
@Override
public void selectInputStreams(Collection<EditLogInputStream> streams,
long fromTxnId, boolean inProgressOk, boolean forReading) throws IOException {
QuorumCall<AsyncLogger, RemoteEditLogManifest> q =
loggers.getEditLogManifest(fromTxnId, forReading, inProgressOk);
Map<AsyncLogger, RemoteEditLogManifest> resps =
loggers.waitForWriteQuorum(q, selectInputStreamsTimeoutMs,
"selectInputStreams");
LOG.debug("selectInputStream manifests:\n" +
Joiner.on("\n").withKeyValueSeparator(": ").join(resps));
final PriorityQueue<EditLogInputStream> allStreams =
new PriorityQueue<EditLogInputStream>(64,
JournalSet.EDIT_LOG_INPUT_STREAM_COMPARATOR);
for (Map.Entry<AsyncLogger, RemoteEditLogManifest> e : resps.entrySet()) {
AsyncLogger logger = e.getKey();
RemoteEditLogManifest manifest = e.getValue();
for (RemoteEditLog remoteLog : manifest.getLogs()) {
URL url = logger.buildURLToFetchLogs(remoteLog.getStartTxId());
EditLogInputStream elis = EditLogFileInputStream.fromUrl(
url, remoteLog.getStartTxId(), remoteLog.getEndTxId(),
remoteLog.isInProgress());
allStreams.add(elis);
}
}
JournalSet.chainAndMakeRedundantStreams(streams, allStreams, fromTxnId);
}
示例11: downloadCheckpointFiles
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest; //导入方法依赖的package包/类
/**
* Download <code>fsimage</code> and <code>edits</code>
* files from the name-node.
* @return true if a new image has been downloaded and needs to be loaded
* @throws IOException
*/
static boolean downloadCheckpointFiles(final String nnHostPort,
final FSImage dstImage, final CheckpointSignature sig,
final RemoteEditLogManifest manifest) throws IOException {
// Sanity check manifest - these could happen if, eg, someone on the
// NN side accidentally rmed the storage directories
if (manifest.getLogs().isEmpty()) {
throw new IOException("Found no edit logs to download on NN since txid "
+ sig.mostRecentCheckpointTxId);
}
long expectedTxId = sig.mostRecentCheckpointTxId + 1;
if (manifest.getLogs().get(0).getStartTxId() != expectedTxId) {
throw new IOException("Bad edit log manifest (expected txid = "
+ expectedTxId + ": " + manifest);
}
dstImage.storage.cTime = sig.cTime;
// get fsimage
boolean downloadImage = true;
if (sig.mostRecentCheckpointTxId == dstImage.storage
.getMostRecentCheckpointTxId() &&
sig.mostRecentCheckpointTxId != -1) {
downloadImage = false;
LOG.info("Image has not changed. Will not download image.");
} else {
MD5Hash downloadedHash = TransferFsImage
.downloadImageToStorage(nnHostPort, sig.mostRecentCheckpointTxId,
dstImage, true);
dstImage.checkpointUploadDone(sig.mostRecentCheckpointTxId, downloadedHash);
dstImage.saveDigestAndRenameCheckpointImage(sig.mostRecentCheckpointTxId,
downloadedHash);
}
// get edits file
for (RemoteEditLog log : manifest.getLogs()) {
if (log.inProgress())
break;
TransferFsImage.downloadEditsToStorage(nnHostPort, log,
dstImage.storage);
}
return downloadImage;
}