本文整理汇总了Java中org.apache.hadoop.hdfs.server.namenode.FSEditLog类的典型用法代码示例。如果您正苦于以下问题:Java FSEditLog类的具体用法?Java FSEditLog怎么用?Java FSEditLog使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
FSEditLog类属于org.apache.hadoop.hdfs.server.namenode包,在下文中一共展示了FSEditLog类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: setUp
import org.apache.hadoop.hdfs.server.namenode.FSEditLog; //导入依赖的package包/类
public void setUp(Configuration confg, MiniJournalCluster jCluster,
String name)
throws Exception {
LOG.info("START TEST : " + name);
handler = new TestAvatarQJMFailuresHandler();
InjectionHandler.set(handler);
FSEditLog.setRuntimeForTesting(Runtime.getRuntime());
conf = confg;
if (jCluster == null) {
cluster = new MiniAvatarCluster.Builder(conf).numDataNodes(1)
.enableQJM(true).build();
} else {
cluster = new MiniAvatarCluster.Builder(conf).numDataNodes(1)
.enableQJM(true).setJournalCluster(jCluster).build();
}
fs = cluster.getFileSystem();
journalCluster = cluster.getJournalCluster();
}
示例2: tearDown
import org.apache.hadoop.hdfs.server.namenode.FSEditLog; //导入依赖的package包/类
@After
public void tearDown() throws Exception {
try {
super.tearDown();
if (tempEditsFile != null) {
if (!tempEditsFile.delete()) {
LOG.warn("Unable to delete temporary edits file: " +
tempEditsFile.getAbsolutePath());
}
}
} finally {
// Reset sizeFlushBuffer between each unit test (in case it has been
// altered by a unit test to trigger a boundary condition)
int lastSizeFlushBuffer = FSEditLog.sizeFlushBuffer;
FSEditLog.sizeFlushBuffer = origSizeFlushBuffer;
if (lastSizeFlushBuffer != origSizeFlushBuffer) {
LOG.info("Setting FSEditLog.sizeFlushBuffer back to " +
origSizeFlushBuffer + " was " + lastSizeFlushBuffer +
" after last test.");
}
}
}
示例3: purgeLogsOlderThan
import org.apache.hadoop.hdfs.server.namenode.FSEditLog; //导入依赖的package包/类
/**
* @see JournalManager#purgeLogsOlderThan(long)
*/
public synchronized void purgeLogsOlderThan(RequestInfo reqInfo,
long minTxIdToKeep) throws IOException {
checkJournalStorageFormatted();
checkRequest(reqInfo);
journalStorage.purgeDataOlderThan(minTxIdToKeep);
if (minTxIdToKeep == FSEditLog.PURGE_ALL_TXID) {
// When trying to remove all the segments, reset
// the committed transaction ID too.
committedTxnId.set(0, true);
minTxid = 0;
} else {
minTxid = minTxIdToKeep;
}
if (imageStorage.isFormatted()) {
imageStorage.purgeDataOlderThan(minTxIdToKeep == 0 ? -1 : minTxIdToKeep);
}
}
示例4: causeFailureOnEditLogRead
import org.apache.hadoop.hdfs.server.namenode.FSEditLog; //导入依赖的package包/类
private LimitedEditLogAnswer causeFailureOnEditLogRead() throws IOException {
FSEditLog spyEditLog = NameNodeAdapter.spyOnEditLog(nn1);
LimitedEditLogAnswer answer = new LimitedEditLogAnswer();
doAnswer(answer).when(spyEditLog).selectInputStreams(
anyLong(), anyLong(), (MetaRecoveryContext)anyObject(), anyBoolean());
return answer;
}
示例5: processNamenodesForShutdown
import org.apache.hadoop.hdfs.server.namenode.FSEditLog; //导入依赖的package包/类
private void processNamenodesForShutdown(Collection<Thread> threads) {
Runtime runtime = Runtime.getRuntime();
runtime = spy(runtime);
doNothing().when(runtime).exit(anyInt());
FSEditLog.setRuntimeForTesting(runtime);
for (NameNodeInfo nnInfo : nameNodes) {
Thread st = new Thread(new ShutDownUtil(nnInfo));
st.start();
threads.add(st);
}
}
示例6: tearDown
import org.apache.hadoop.hdfs.server.namenode.FSEditLog; //导入依赖的package包/类
private void tearDown(FSEditLog localEditLog,
boolean writeEndTxn, boolean updateLastCorrectTxn) throws IOException {
localEditLog.endCurrentLogSegment(writeEndTxn);
endTxId = localEditLog.getLastWrittenTxId();
running = false;
lastScan = true;
if (updateLastCorrectTxn) {
standby.setLastCorrectTxId(endTxId);
}
standby.clearIngestState(endTxId + 1);
}
示例7: testJournalClusterFailureWhenRequired
import org.apache.hadoop.hdfs.server.namenode.FSEditLog; //导入依赖的package包/类
/**
* Tests that if the journal cluster is marked as required, we cannot
* tolerate its failure.
*/
@Test
public void testJournalClusterFailureWhenRequired() throws Exception {
Configuration conf = new Configuration();
journalCluster = new MiniJournalCluster.Builder(conf).numJournalNodes(
3).build();
String journalURI = journalCluster.getQuorumJournalURI(
MiniAvatarCluster.JID).toString();
conf.set("dfs.name.edits.dir.required", journalURI + "/zero," + journalURI
+ "/one");
setUp(conf, journalCluster, "testJournalClusterFailureWhenRequired");
// spy the runtime
Runtime runtime = Runtime.getRuntime();
runtime = spy(runtime);
doNothing().when(runtime).exit(anyInt());
FSEditLog.setRuntimeForTesting(runtime);
// Kill standby to ensure only 1 runtime.exit();
cluster.killStandby();
journalCluster.shutdown();
fs.create(new Path("/test1"));
// verify failure.
verify(runtime, times(1)).exit(anyInt());
}
示例8: setUp
import org.apache.hadoop.hdfs.server.namenode.FSEditLog; //导入依赖的package包/类
public void setUp(String name) throws Exception {
LOG.info("START TEST : " + name);
handler = new TestAvatarQJMFailuresHandler();
InjectionHandler.set(handler);
FSEditLog.setRuntimeForTesting(Runtime.getRuntime());
conf = new Configuration();
cluster = new MiniAvatarCluster.Builder(conf).numDataNodes(1)
.enableQJM(true).build();
fs = cluster.getFileSystem();
journalCluster = cluster.getJournalCluster();
}
示例9: setUp
import org.apache.hadoop.hdfs.server.namenode.FSEditLog; //导入依赖的package包/类
private void setUp(String name) throws Exception {
LOG.info("------------------- test: " + name + " START ----------------");
conf = new Configuration();
conf.setLong("fs.checkpoint.period", 3600);
cluster = new MiniAvatarCluster(conf, 2, true, null, null);
fs = cluster.getFileSystem();
// spy the runtime
runtime = Runtime.getRuntime();
runtime = spy(runtime);
doNothing().when(runtime).exit(anyInt());
FSEditLog.setRuntimeForTesting(runtime);
}
示例10: setUp
import org.apache.hadoop.hdfs.server.namenode.FSEditLog; //导入依赖的package包/类
@Before
public void setUp() throws Exception {
super.setUp();
// Remember the size of the flush buffer so that we can reset it
// in tearDown() in case any of the tests alter it
origSizeFlushBuffer = FSEditLog.sizeFlushBuffer;
// Create a temp file to use for comparison of BookKeeper based
// input stream and the file based input stream
tempEditsFile = FSEditLogTestUtil.createTempEditsFile(
"testBookKeeperEditLogInputStream");
}
示例11: startLogSegment
import org.apache.hadoop.hdfs.server.namenode.FSEditLog; //导入依赖的package包/类
@Override
public EditLogOutputStream startLogSegment(long txId) throws IOException {
Preconditions.checkState(isActiveWriter,
"must recover segments before starting a new one");
QuorumCall<AsyncLogger,Void> q = loggers.startLogSegment(txId);
loggers.waitForWriteQuorum(q, startSegmentTimeoutMs,
"startLogSegment(" + txId + ")");
return new QuorumOutputStream(loggers, txId, FSEditLog.sizeFlushBuffer,
writeTxnsTimeoutMs, metrics, journalId);
}
示例12: causeFailureOnEditLogRead
import org.apache.hadoop.hdfs.server.namenode.FSEditLog; //导入依赖的package包/类
private LimitedEditLogAnswer causeFailureOnEditLogRead() throws IOException {
FSEditLog spyEditLog = spy(nn1.getNamesystem().getEditLogTailer()
.getEditLog());
LimitedEditLogAnswer answer = new LimitedEditLogAnswer();
doAnswer(answer).when(spyEditLog).selectInputStreams(
anyLong(), anyLong(), (MetaRecoveryContext)anyObject(), anyBoolean());
nn1.getNamesystem().getEditLogTailer().setEditLog(spyEditLog);
return answer;
}
示例13: getEditLog
import org.apache.hadoop.hdfs.server.namenode.FSEditLog; //导入依赖的package包/类
@VisibleForTesting
FSEditLog getEditLog() {
return editLog;
}
示例14: setEditLog
import org.apache.hadoop.hdfs.server.namenode.FSEditLog; //导入依赖的package包/类
@VisibleForTesting
public void setEditLog(FSEditLog editLog) {
this.editLog = editLog;
}
示例15: setEditLogForTesting
import org.apache.hadoop.hdfs.server.namenode.FSEditLog; //导入依赖的package包/类
public static void setEditLogForTesting(FSNamesystem fsn, FSEditLog newLog) {
Whitebox.setInternalState(fsn.getFSImage(), "editLog", newLog);
Whitebox.setInternalState(fsn.getFSDirectory(), "editLog", newLog);
}