本文整理汇总了Java中org.apache.hadoop.hdfs.qjournal.MiniJournalCluster类的典型用法代码示例。如果您正苦于以下问题:Java MiniJournalCluster类的具体用法?Java MiniJournalCluster怎么用?Java MiniJournalCluster使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
MiniJournalCluster类属于org.apache.hadoop.hdfs.qjournal包,在下文中一共展示了MiniJournalCluster类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: writeSegmentUntilCrash
import org.apache.hadoop.hdfs.qjournal.MiniJournalCluster; //导入依赖的package包/类
private long writeSegmentUntilCrash(MiniJournalCluster cluster,
QuorumJournalManager qjm, long txid, int numTxns, Holder<Throwable> thrown) {
long firstTxId = txid;
long lastAcked = txid - 1;
try {
EditLogOutputStream stm = qjm.startLogSegment(txid,
NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
for (int i = 0; i < numTxns; i++) {
QJMTestUtil.writeTxns(stm, txid++, 1);
lastAcked++;
}
stm.close();
qjm.finalizeLogSegment(firstTxId, lastAcked);
} catch (Throwable t) {
thrown.held = t;
}
return lastAcked;
}
示例2: doWorkload
import org.apache.hadoop.hdfs.qjournal.MiniJournalCluster; //导入依赖的package包/类
/**
* Run a simple workload of becoming the active writer and writing
* two log segments: 1-3 and 4-6.
*/
private static int doWorkload(MiniJournalCluster cluster,
QuorumJournalManager qjm) throws IOException {
int lastAcked = 0;
try {
qjm.recoverUnfinalizedSegments();
writeSegment(cluster, qjm, 1, 3, true);
lastAcked = 3;
writeSegment(cluster, qjm, 4, 3, true);
lastAcked = 6;
} catch (QuorumException qe) {
LOG.info("Failed to write at txid " + lastAcked,
qe);
}
return lastAcked;
}
示例3: setup
import org.apache.hadoop.hdfs.qjournal.MiniJournalCluster; //导入依赖的package包/类
@Before
public void setup() throws Exception {
conf = new Configuration();
// Don't retry connections - it just slows down the tests.
conf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 0);
cluster = new MiniJournalCluster.Builder(conf)
.build();
qjm = createSpyingQJM();
spies = qjm.getLoggerSetForTests().getLoggersForTests();
qjm.format(QJMTestUtil.FAKE_NSINFO);
qjm.recoverUnfinalizedSegments();
assertEquals(1, qjm.getLoggerSetForTests().getEpoch());
}
示例4: checkRecovery
import org.apache.hadoop.hdfs.qjournal.MiniJournalCluster; //导入依赖的package包/类
private void checkRecovery(MiniJournalCluster cluster,
long segmentTxId, long expectedEndTxId)
throws IOException {
int numFinalized = 0;
for (int i = 0; i < cluster.getNumNodes(); i++) {
File logDir = cluster.getCurrentDir(i, JID);
EditLogFile elf = FileJournalManager.getLogFile(logDir, segmentTxId);
if (elf == null) {
continue;
}
if (!elf.isInProgress()) {
numFinalized++;
if (elf.getLastTxId() != expectedEndTxId) {
fail("File " + elf + " finalized to wrong txid, expected " +
expectedEndTxId);
}
}
}
if (numFinalized < cluster.getQuorumSize()) {
fail("Did not find a quorum of finalized logs starting at " +
segmentTxId);
}
}
示例5: setup
import org.apache.hadoop.hdfs.qjournal.MiniJournalCluster; //导入依赖的package包/类
@Before
public void setup() throws Exception {
conf = new Configuration();
// Don't retry connections - it just slows down the tests.
conf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 0);
cluster = new MiniJournalCluster.Builder(conf)
.build();
cluster.waitActive();
qjm = createSpyingQJM();
spies = qjm.getLoggerSetForTests().getLoggersForTests();
qjm.format(QJMTestUtil.FAKE_NSINFO);
qjm.recoverUnfinalizedSegments();
assertEquals(1, qjm.getLoggerSetForTests().getEpoch());
}
示例6: simulateFailute
import org.apache.hadoop.hdfs.qjournal.MiniJournalCluster; //导入依赖的package包/类
private void simulateFailute(InjectionEventI event, Object... args)
throws IOException {
// get the journal node
ServletContext context = (ServletContext) args[0];
JournalNode jn = (JournalNode) context
.getAttribute(JournalNodeHttpServer.JN_ATTRIBUTE_KEY);
// configuration stores the index of the node
Configuration conf = jn.getConf();
// check which node this is
int jid = conf.getInt(MiniJournalCluster.DFS_JOURNALNODE_TEST_ID, 0);
// fail if we are supposed to fail on this event
if (event == failOn[jid]) {
exceptionsThrown.incrementAndGet();
throw new IOException("Testing failures");
}
}
示例7: writeSegmentUntilCrash
import org.apache.hadoop.hdfs.qjournal.MiniJournalCluster; //导入依赖的package包/类
private long writeSegmentUntilCrash(MiniJournalCluster cluster,
QuorumJournalManager qjm, long txid, int numTxns, Holder<Throwable> thrown) {
long firstTxId = txid;
long lastAcked = txid - 1;
try {
EditLogOutputStream stm = qjm.startLogSegment(txid);
for (int i = 0; i < numTxns; i++) {
QJMTestUtil.writeTxns(stm, txid++, 1);
lastAcked++;
}
stm.close();
qjm.finalizeLogSegment(firstTxId, lastAcked);
} catch (Throwable t) {
thrown.held = t;
}
return lastAcked;
}
示例8: setup
import org.apache.hadoop.hdfs.qjournal.MiniJournalCluster; //导入依赖的package包/类
@Before
public void setup() throws Exception {
conf = new Configuration();
// Don't retry connections - it just slows down the tests.
conf.setInt("ipc.client.connect.max.retries", 0);
conf.setLong(JournalConfigKeys.DFS_QJOURNAL_CONNECT_TIMEOUT_KEY, 100);
cluster = new MiniJournalCluster.Builder(conf)
.build();
qjm = createSpyingQJM();
spies = qjm.getLoggerSetForTests().getLoggersForTests();
qjm.transitionJournal(QJMTestUtil.FAKE_NSINFO, Transition.FORMAT, null);
qjm.recoverUnfinalizedSegments();
assertEquals(1, qjm.getLoggerSetForTests().getEpoch());
}
示例9: checkRecovery
import org.apache.hadoop.hdfs.qjournal.MiniJournalCluster; //导入依赖的package包/类
private void checkRecovery(MiniJournalCluster cluster,
long segmentTxId, long expectedEndTxId)
throws IOException {
int numFinalized = 0;
for (int i = 0; i < cluster.getNumNodes(); i++) {
File logDir = cluster.getJournalCurrentDir(i, JID);
EditLogFile elf = FileJournalManager.getLogFile(logDir, segmentTxId);
if (elf == null) {
continue;
}
if (!elf.isInProgress()) {
numFinalized++;
if (elf.getLastTxId() != expectedEndTxId) {
fail("File " + elf + " finalized to wrong txid, expected " +
expectedEndTxId);
}
}
}
if (numFinalized < cluster.getQuorumSize()) {
fail("Did not find a quorum of finalized logs starting at " +
segmentTxId);
}
}
示例10: setup
import org.apache.hadoop.hdfs.qjournal.MiniJournalCluster; //导入依赖的package包/类
@Before
public void setup() throws Exception {
conf = new Configuration();
// Don't retry connections - it just slows down the tests.
conf.setInt("ipc.client.connect.max.retries", 0);
conf.setLong(JournalConfigKeys.DFS_QJOURNAL_CONNECT_TIMEOUT_KEY, 100);
cluster = new MiniJournalCluster.Builder(conf)
.build();
qjm = TestQuorumJournalManager.createSpyingQJM(conf, cluster, JID, FAKE_NSINFO);
qjm.transitionJournal(QJMTestUtil.FAKE_NSINFO, Transition.FORMAT, null);
qjm.recoverUnfinalizedSegments();
assertEquals(1, qjm.getLoggerSetForTests().getEpoch());
}
示例11: setup
import org.apache.hadoop.hdfs.qjournal.MiniJournalCluster; //导入依赖的package包/类
@Before
public void setup() throws Exception {
File editsDir = new File(MiniDFSCluster.getBaseDirectory(null)
+ File.separator + "TestJournalNode");
FileUtil.fullyDelete(editsDir);
conf.set(JournalConfigKeys.DFS_JOURNALNODE_DIR_KEY,
editsDir.getAbsolutePath());
conf.set(JournalConfigKeys.DFS_JOURNALNODE_RPC_ADDRESS_KEY, "0.0.0.0:0");
int port = MiniJournalCluster.getFreeHttpPortAndUpdateConf(conf, true);
httpAddress = "http://localhost:" + port;
jn = new JournalNode();
jn.setConf(conf);
jn.start();
journalId = "test-journalid-" + QJMTestUtil.uniqueSequenceId();
journal = jn.getOrCreateJournal(QuorumJournalManager
.journalIdStringToBytes(journalId));
journal.transitionJournal(FAKE_NSINFO, Transition.FORMAT, null);
journal.transitionImage(FAKE_NSINFO, Transition.FORMAT, null);
}
示例12: setup
import org.apache.hadoop.hdfs.qjournal.MiniJournalCluster; //导入依赖的package包/类
@Before
public void setup() throws Exception {
File editsDir = new File(MiniDFSCluster.getBaseDirectory(null) +
File.separator + "TestJournalNode");
FileUtil.fullyDelete(editsDir);
conf.set(JournalConfigKeys.DFS_JOURNALNODE_DIR_KEY,
editsDir.getAbsolutePath());
conf.set(JournalConfigKeys.DFS_JOURNALNODE_RPC_ADDRESS_KEY,
"0.0.0.0:0");
MiniJournalCluster.getFreeHttpPortAndUpdateConf(conf, true);
jn = new JournalNode();
jn.setConf(conf);
jn.start();
journalId = "test-journalid-" + QJMTestUtil.uniqueSequenceId();
journal = jn.getOrCreateJournal(QuorumJournalManager
.journalIdStringToBytes(journalId));
journal.transitionJournal(FAKE_NSINFO, Transition.FORMAT, null);
ch = new IPCLoggerChannel(conf, FAKE_NSINFO, journalId, jn.getBoundIpcAddress());
}
示例13: testFailToStartWithBadConfig
import org.apache.hadoop.hdfs.qjournal.MiniJournalCluster; //导入依赖的package包/类
@Test
public void testFailToStartWithBadConfig() throws Exception {
Configuration conf = new Configuration();
conf.set(JournalConfigKeys.DFS_JOURNALNODE_DIR_KEY, "non-absolute-path");
MiniJournalCluster.getFreeHttpPortAndUpdateConf(conf, true);
assertJNFailsToStart(conf, "should be an absolute path");
// Existing file which is not a directory
conf.set(JournalConfigKeys.DFS_JOURNALNODE_DIR_KEY, "/dev/null");
assertJNFailsToStart(conf, "is not a directory");
// Directory which cannot be created
conf.set(org.apache.hadoop.hdfs.qjournal.protocol.JournalConfigKeys.DFS_JOURNALNODE_DIR_KEY, "/proc/does-not-exist");
assertJNFailsToStart(conf, "Could not create");
}
示例14: setUp
import org.apache.hadoop.hdfs.qjournal.MiniJournalCluster; //导入依赖的package包/类
public void setUp(Configuration confg, MiniJournalCluster jCluster,
String name)
throws Exception {
LOG.info("START TEST : " + name);
handler = new TestAvatarQJMFailuresHandler();
InjectionHandler.set(handler);
FSEditLog.setRuntimeForTesting(Runtime.getRuntime());
conf = confg;
if (jCluster == null) {
cluster = new MiniAvatarCluster.Builder(conf).numDataNodes(1)
.enableQJM(true).build();
} else {
cluster = new MiniAvatarCluster.Builder(conf).numDataNodes(1)
.enableQJM(true).setJournalCluster(jCluster).build();
}
fs = cluster.getFileSystem();
journalCluster = cluster.getJournalCluster();
}
示例15: testUpgradeFailureAfterSaveImage
import org.apache.hadoop.hdfs.qjournal.MiniJournalCluster; //导入依赖的package包/类
/**
* This test simulates the scenario where the upgrade fails after saving image
* and ensures that the recovery on the journal nodes work correctly.
*/
@Test
public void testUpgradeFailureAfterSaveImage() throws Exception {
h.failAfterSaveImage = true;
long[] checksums = getChecksums();
// Upgrade the cluster.
MiniJournalCluster journalCluster = cluster.getJournalCluster();
// This upgrade will fail after saving the image.
try {
cluster = new MiniAvatarCluster.Builder(conf).numDataNodes(1)
.format(false).startOpt(StartupOption.UPGRADE)
.setJournalCluster(journalCluster).instantionRetries(1).build();
fail("Upgrade did not throw exception");
} catch (IOException ie) {
// ignore.
}
// This will correctly recover the upgrade directories.
cluster = new MiniAvatarCluster.Builder(conf).numDataNodes(1).format(false)
.setJournalCluster(cluster.getJournalCluster()).build();
verifyUpgrade(checksums, true);
}