本文整理汇总了Java中org.apache.hadoop.hdfs.MiniDFSCluster类的典型用法代码示例。如果您正苦于以下问题:Java MiniDFSCluster类的具体用法?Java MiniDFSCluster怎么用?Java MiniDFSCluster使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
MiniDFSCluster类属于org.apache.hadoop.hdfs包,在下文中一共展示了MiniDFSCluster类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: assertClusterStartFailsWhenDirLocked
import org.apache.hadoop.hdfs.MiniDFSCluster; //导入依赖的package包/类
/**
* Assert that, if sdToLock is locked, the cluster is not allowed to start up.
* @param conf cluster conf to use
* @param sdToLock the storage directory to lock
*/
private static void assertClusterStartFailsWhenDirLocked(
Configuration conf, StorageDirectory sdToLock) throws IOException {
// Lock the edits dir, then start the NN, and make sure it fails to start
sdToLock.lock();
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).format(false)
.manageNameDfsDirs(false).numDataNodes(0).build();
assertFalse("cluster should fail to start after locking " +
sdToLock, sdToLock.isLockSupported());
} catch (IOException ioe) {
GenericTestUtils.assertExceptionContains("already locked", ioe);
} finally {
cleanup(cluster);
cluster = null;
sdToLock.unlock();
}
}
示例2: createCluster
import org.apache.hadoop.hdfs.MiniDFSCluster; //导入依赖的package包/类
private MiniDFSCluster createCluster() throws HDFSQuasiServiceException {
MiniDFSCluster hdfsCluster = null;
File baseDir = new File(getWorkingDir()).getAbsoluteFile();
FileUtil.fullyDelete(baseDir);
this.conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, baseDir.getAbsolutePath());
LOG.info("Using base dir " + baseDir.getAbsolutePath());
MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(this.conf);
builder.numDataNodes(getNumberOfDataNodes());
try {
hdfsCluster = builder.build();
} catch (IOException e) {
LOG.error("Error in creating mini DFS cluster ", e);
throw new HDFSQuasiServiceException("Error in creating mini DFS cluster ", e);
}
ListIterator<DataNode> itr = hdfsCluster.getDataNodes().listIterator();
LOG.info("NameNode: " + hdfsCluster.getNameNode().getNameNodeAddressHostPortString());
while (itr.hasNext()) {
DataNode dn = itr.next();
LOG.info("DataNode: " + dn.getDisplayName());
}
return hdfsCluster;
}
示例3: testDTInInsecureCluster
import org.apache.hadoop.hdfs.MiniDFSCluster; //导入依赖的package包/类
@Test
public void testDTInInsecureCluster() throws Exception {
MiniDFSCluster cluster = null;
final Configuration conf = WebHdfsTestUtil.createConf();
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
final FileSystem webHdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf,
WebHdfsFileSystem.SCHEME);
webHdfs.getDelegationToken(null);
fail("No exception is thrown.");
} catch (AccessControlException ace) {
Assert.assertTrue(ace.getMessage().startsWith(
WebHdfsFileSystem.CANT_FALLBACK_TO_INSECURE_MSG));
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
示例4: testWriteToTemporary
import org.apache.hadoop.hdfs.MiniDFSCluster; //导入依赖的package包/类
@Test
public void testWriteToTemporary() throws Exception {
MiniDFSCluster cluster = new MiniDFSCluster.Builder(new HdfsConfiguration()).build();
try {
cluster.waitActive();
DataNode dn = cluster.getDataNodes().get(0);
FsDatasetImpl dataSet = (FsDatasetImpl)DataNodeTestUtils.getFSDataset(dn);
// set up replicasMap
String bpid = cluster.getNamesystem().getBlockPoolId();
ExtendedBlock[] blocks = setup(bpid, dataSet);
// test writeToTemporary
testWriteToTemporary(dataSet, blocks);
} finally {
cluster.shutdown();
}
}
示例5: testMerge
import org.apache.hadoop.hdfs.MiniDFSCluster; //导入依赖的package包/类
public void testMerge() throws Exception {
MiniDFSCluster dfsCluster = null;
MiniMRClientCluster mrCluster = null;
FileSystem fileSystem = null;
try {
Configuration conf = new Configuration();
// Start the mini-MR and mini-DFS clusters
dfsCluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(NUM_HADOOP_DATA_NODES).build();
fileSystem = dfsCluster.getFileSystem();
mrCluster = MiniMRClientClusterFactory.create(this.getClass(),
NUM_HADOOP_DATA_NODES, conf);
// Generate input.
createInput(fileSystem);
// Run the test.
runMergeTest(new JobConf(mrCluster.getConfig()), fileSystem);
} finally {
if (dfsCluster != null) {
dfsCluster.shutdown();
}
if (mrCluster != null) {
mrCluster.stop();
}
}
}
示例6: setup
import org.apache.hadoop.hdfs.MiniDFSCluster; //导入依赖的package包/类
@Before
public void setup() throws Exception {
File editsDir = new File(MiniDFSCluster.getBaseDirectory() +
File.separator + "TestJournalNode");
FileUtil.fullyDelete(editsDir);
conf.set(DFSConfigKeys.DFS_JOURNALNODE_EDITS_DIR_KEY,
editsDir.getAbsolutePath());
conf.set(DFSConfigKeys.DFS_JOURNALNODE_RPC_ADDRESS_KEY,
"0.0.0.0:0");
jn = new JournalNode();
jn.setConf(conf);
jn.start();
journalId = "test-journalid-" + GenericTestUtils.uniqueSequenceId();
journal = jn.getOrCreateJournal(journalId);
journal.format(FAKE_NSINFO);
ch = new IPCLoggerChannel(conf, FAKE_NSINFO, journalId, jn.getBoundIpcAddress());
}
示例7: setup
import org.apache.hadoop.hdfs.MiniDFSCluster; //导入依赖的package包/类
/** Start a cluster */
@Before
public void setup() throws Exception {
conf = new HdfsConfiguration();
conf.setBoolean(DFS_NAMENODE_ENABLE_RETRY_CACHE_KEY, true);
conf.setInt(DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY, 2);
cluster = new MiniDFSCluster.Builder(conf)
.nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(3)
.build();
cluster.waitActive();
cluster.transitionToActive(namenodeId);
HATestUtil.setFailoverConfigurations(cluster, conf);
filesystem = (DistributedFileSystem) HATestUtil.configureFailoverFs(cluster, conf);
namesystem = cluster.getNamesystem(namenodeId);
metrics = namesystem.getRetryCache().getMetricsForTests();
}
示例8: testWithDFS
import org.apache.hadoop.hdfs.MiniDFSCluster; //导入依赖的package包/类
public void testWithDFS() throws IOException {
MiniDFSCluster dfs = null;
MiniMRCluster mr = null;
FileSystem fileSys = null;
try {
final int taskTrackers = 4;
JobConf conf = new JobConf();
conf.set(JTConfig.JT_SYSTEM_DIR, "/tmp/custom/mapred/system");
dfs = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
fileSys = dfs.getFileSystem();
mr = new MiniMRCluster(taskTrackers, fileSys.getUri().toString(), 1, null, null, conf);
runWordCount(mr, mr.createJobConf(), conf.get("mapred.system.dir"));
} finally {
if (dfs != null) { dfs.shutdown(); }
if (mr != null) { mr.shutdown();
}
}
}
示例9: setUp
import org.apache.hadoop.hdfs.MiniDFSCluster; //导入依赖的package包/类
@BeforeClass
public static void setUp() throws Exception {
final Configuration conf = new Configuration();
conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.YARN_FRAMEWORK_NAME);
conf.set(YarnConfiguration.RM_PRINCIPAL, "jt_id/" + SecurityUtil.HOSTNAME_PATTERN + "@APACHE.ORG");
final MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
builder.checkExitOnShutdown(true);
builder.numDataNodes(numSlaves);
builder.format(true);
builder.racks(null);
dfsCluster = builder.build();
mrCluster = new MiniMRYarnCluster(TestBinaryTokenFile.class.getName(), noOfNMs);
mrCluster.init(conf);
mrCluster.start();
NameNodeAdapter.getDtSecretManager(dfsCluster.getNamesystem()).startThreads();
FileSystem fs = dfsCluster.getFileSystem();
p1 = new Path("file1");
p1 = fs.makeQualified(p1);
}
示例10: setUp
import org.apache.hadoop.hdfs.MiniDFSCluster; //导入依赖的package包/类
@Before
public void setUp() throws Exception {
conf = new Configuration();
conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_XATTRS_PER_INODE_KEY, 2);
cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(REPLICATION)
.build();
cluster.waitActive();
fsn = cluster.getNamesystem();
fsdir = fsn.getFSDirectory();
hdfs = cluster.getFileSystem();
DFSTestUtil.createFile(hdfs, file1, 1024, REPLICATION, seed);
DFSTestUtil.createFile(hdfs, file2, 1024, REPLICATION, seed);
DFSTestUtil.createFile(hdfs, file3, 1024, REPLICATION, seed);
DFSTestUtil.createFile(hdfs, file5, 1024, REPLICATION, seed);
hdfs.mkdirs(sub2);
}
示例11: suite
import org.apache.hadoop.hdfs.MiniDFSCluster; //导入依赖的package包/类
public static Test suite() {
TestSetup setup = new TestSetup(new TestSuite(TestJoinProperties.class)) {
protected void setUp() throws Exception {
Configuration conf = new Configuration();
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
base = cluster.getFileSystem().makeQualified(new Path("/nested"));
src = generateSources(conf);
}
protected void tearDown() throws Exception {
if (cluster != null) {
cluster.shutdown();
}
}
};
return setup;
}
示例12: testFsckNonExistent
import org.apache.hadoop.hdfs.MiniDFSCluster; //导入依赖的package包/类
@Test
public void testFsckNonExistent() throws Exception {
DFSTestUtil util = new DFSTestUtil.Builder().setName("TestFsck").
setNumFiles(20).build();
MiniDFSCluster cluster = null;
FileSystem fs = null;
try {
Configuration conf = new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
fs = cluster.getFileSystem();
util.createFiles(fs, "/srcdat");
util.waitReplication(fs, "/srcdat", (short)3);
String outStr = runFsck(conf, 0, true, "/non-existent");
assertEquals(-1, outStr.indexOf(NamenodeFsck.HEALTHY_STATUS));
System.out.println(outStr);
util.cleanup(fs, "/srcdat");
} finally {
if (fs != null) {try{fs.close();} catch(Exception e){}}
if (cluster != null) { cluster.shutdown(); }
}
}
示例13: testStorageBlockContentsStaleAfterNNRestart
import org.apache.hadoop.hdfs.MiniDFSCluster; //导入依赖的package包/类
/**
* Verify the following scenario.
* 1. NN restarts.
* 2. Heartbeat RPC will retry and succeed. NN asks DN to reregister.
* 3. After reregistration completes, DN will send Heartbeat, followed by
* Blockreport.
* 4. NN will mark DatanodeStorageInfo#blockContentsStale to false.
* @throws Exception
*/
@Test(timeout = 60000)
public void testStorageBlockContentsStaleAfterNNRestart() throws Exception {
MiniDFSCluster dfsCluster = null;
try {
Configuration config = new Configuration();
dfsCluster = new MiniDFSCluster.Builder(config).numDataNodes(1).build();
dfsCluster.waitActive();
dfsCluster.restartNameNode(true);
BlockManagerTestUtil.checkHeartbeat(
dfsCluster.getNamesystem().getBlockManager());
MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
ObjectName mxbeanNameFsns = new ObjectName(
"Hadoop:service=NameNode,name=FSNamesystemState");
Integer numStaleStorages = (Integer) (mbs.getAttribute(
mxbeanNameFsns, "NumStaleStorages"));
assertEquals(0, numStaleStorages.intValue());
} finally {
if (dfsCluster != null) {
dfsCluster.shutdown();
}
}
return;
}
示例14: testMoverCliWithFederationHA
import org.apache.hadoop.hdfs.MiniDFSCluster; //导入依赖的package包/类
@Test
public void testMoverCliWithFederationHA() throws Exception {
final MiniDFSCluster cluster = new MiniDFSCluster
.Builder(new HdfsConfiguration())
.nnTopology(MiniDFSNNTopology.simpleHAFederatedTopology(3))
.numDataNodes(0).build();
final Configuration conf = new HdfsConfiguration();
DFSTestUtil.setFederatedHAConfiguration(cluster, conf);
try {
Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf);
Assert.assertEquals(3, namenodes.size());
Iterator<URI> iter = namenodes.iterator();
URI nn1 = iter.next();
URI nn2 = iter.next();
URI nn3 = iter.next();
Map<URI, List<Path>> movePaths = Mover.Cli.getNameNodePathsToMove(conf,
"-p", nn1 + "/foo", nn1 + "/bar", nn2 + "/foo/bar", nn3 + "/foobar");
Assert.assertEquals(3, movePaths.size());
checkMovePaths(movePaths.get(nn1), new Path("/foo"), new Path("/bar"));
checkMovePaths(movePaths.get(nn2), new Path("/foo/bar"));
checkMovePaths(movePaths.get(nn3), new Path("/foobar"));
} finally {
cluster.shutdown();
}
}
示例15: testBadConstructor
import org.apache.hadoop.hdfs.MiniDFSCluster; //导入依赖的package包/类
/**
* Test that a implementation of JournalManager without a
* (Configuration,URI) constructor throws an exception
*/
@Test
public void testBadConstructor() throws Exception {
MiniDFSCluster cluster = null;
Configuration conf = new Configuration();
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_PLUGIN_PREFIX + ".dummy",
BadConstructorJournalManager.class.getName());
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
"dummy://test");
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
cluster.waitActive();
fail("Should have failed before this point");
} catch (IllegalArgumentException iae) {
if (!iae.getMessage().contains("Unable to construct journal")) {
fail("Should have failed with unable to construct exception");
}
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}