本文整理匯總了Java中org.apache.hadoop.hbase.HBaseTestingUtility.startMiniDFSCluster方法的典型用法代碼示例。如果您正苦於以下問題:Java HBaseTestingUtility.startMiniDFSCluster方法的具體用法?Java HBaseTestingUtility.startMiniDFSCluster怎麽用?Java HBaseTestingUtility.startMiniDFSCluster使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類org.apache.hadoop.hbase.HBaseTestingUtility
的用法示例。
在下文中一共展示了HBaseTestingUtility.startMiniDFSCluster方法的10個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: verifyFileInDirWithStoragePolicy
import org.apache.hadoop.hbase.HBaseTestingUtility; //導入方法依賴的package包/類
private void verifyFileInDirWithStoragePolicy(final String policy) throws Exception {
HBaseTestingUtility htu = new HBaseTestingUtility();
Configuration conf = htu.getConfiguration();
conf.set(HConstants.WAL_STORAGE_POLICY, policy);
MiniDFSCluster cluster = htu.startMiniDFSCluster(1);
try {
assertTrue(FSUtils.isHDFS(conf));
FileSystem fs = FileSystem.get(conf);
Path testDir = htu.getDataTestDirOnTestFS("testArchiveFile");
fs.mkdirs(testDir);
FSUtils.setStoragePolicy(fs, conf, testDir, HConstants.WAL_STORAGE_POLICY,
HConstants.DEFAULT_WAL_STORAGE_POLICY);
String file = UUID.randomUUID().toString();
Path p = new Path(testDir, file);
WriteDataToHDFS(fs, p, 4096);
// will assert existance before deleting.
cleanupFile(fs, testDir);
} finally {
cluster.shutdown();
}
}
示例2: testHDFSLinkReadDuringRename
import org.apache.hadoop.hbase.HBaseTestingUtility; //導入方法依賴的package包/類
/**
* Test, on HDFS, that the FileLink is still readable
* even when the current file gets renamed.
*/
@Test
public void testHDFSLinkReadDuringRename() throws Exception {
HBaseTestingUtility testUtil = new HBaseTestingUtility();
Configuration conf = testUtil.getConfiguration();
conf.setInt("dfs.blocksize", 1024 * 1024);
conf.setInt("dfs.client.read.prefetch.size", 2 * 1024 * 1024);
testUtil.startMiniDFSCluster(1);
MiniDFSCluster cluster = testUtil.getDFSCluster();
FileSystem fs = cluster.getFileSystem();
assertEquals("hdfs", fs.getUri().getScheme());
try {
testLinkReadDuringRename(fs, testUtil.getDefaultRootDirPath());
} finally {
testUtil.shutdownMiniCluster();
}
}
示例3: setUpBeforeClass
import org.apache.hadoop.hbase.HBaseTestingUtility; //導入方法依賴的package包/類
@BeforeClass
public static void setUpBeforeClass() throws Exception {
TEST_UTIL = new HBaseTestingUtility();
conf = TEST_UTIL.getConfiguration();
TEST_UTIL.startMiniDFSCluster(3);
cluster = TEST_UTIL.getDFSCluster();
fs = cluster.getFileSystem();
}
示例4: setUp
import org.apache.hadoop.hbase.HBaseTestingUtility; //導入方法依賴的package包/類
@Before
public void setUp() throws Exception {
htu = new HBaseTestingUtility();
htu.getConfiguration().setInt("dfs.blocksize", 1024);// For the test with multiple blocks
htu.getConfiguration().setBoolean("dfs.support.append", true);
htu.getConfiguration().setInt("dfs.replication", 3);
htu.startMiniDFSCluster(3,
new String[]{"/r1", "/r2", "/r3"}, new String[]{host1, host2, host3});
conf = htu.getConfiguration();
cluster = htu.getDFSCluster();
dfs = (DistributedFileSystem) FileSystem.get(conf);
}
示例5: testIsHDFS
import org.apache.hadoop.hbase.HBaseTestingUtility; //導入方法依賴的package包/類
@Test public void testIsHDFS() throws Exception {
HBaseTestingUtility htu = new HBaseTestingUtility();
htu.getConfiguration().setBoolean("dfs.support.append", false);
assertFalse(FSUtils.isHDFS(htu.getConfiguration()));
htu.getConfiguration().setBoolean("dfs.support.append", true);
MiniDFSCluster cluster = null;
try {
cluster = htu.startMiniDFSCluster(1);
assertTrue(FSUtils.isHDFS(htu.getConfiguration()));
assertTrue(FSUtils.isAppendSupported(htu.getConfiguration()));
} finally {
if (cluster != null) cluster.shutdown();
}
}
示例6: testRenameAndSetModifyTime
import org.apache.hadoop.hbase.HBaseTestingUtility; //導入方法依賴的package包/類
@Test
public void testRenameAndSetModifyTime() throws Exception {
HBaseTestingUtility htu = new HBaseTestingUtility();
Configuration conf = htu.getConfiguration();
MiniDFSCluster cluster = htu.startMiniDFSCluster(1);
assertTrue(FSUtils.isHDFS(conf));
FileSystem fs = FileSystem.get(conf);
Path testDir = htu.getDataTestDirOnTestFS("testArchiveFile");
String file = UUID.randomUUID().toString();
Path p = new Path(testDir, file);
FSDataOutputStream out = fs.create(p);
out.close();
assertTrue("The created file should be present", FSUtils.isExists(fs, p));
long expect = System.currentTimeMillis() + 1000;
assertNotEquals(expect, fs.getFileStatus(p).getModificationTime());
ManualEnvironmentEdge mockEnv = new ManualEnvironmentEdge();
mockEnv.setValue(expect);
EnvironmentEdgeManager.injectEdge(mockEnv);
try {
String dstFile = UUID.randomUUID().toString();
Path dst = new Path(testDir , dstFile);
assertTrue(FSUtils.renameAndSetModifyTime(fs, p, dst));
assertFalse("The moved file should not be present", FSUtils.isExists(fs, p));
assertTrue("The dst file should be present", FSUtils.isExists(fs, dst));
assertEquals(expect, fs.getFileStatus(dst).getModificationTime());
cluster.shutdown();
} finally {
EnvironmentEdgeManager.reset();
}
}
示例7: setUp
import org.apache.hadoop.hbase.HBaseTestingUtility; //導入方法依賴的package包/類
@Before
public void setUp() throws Exception {
testUtil = new HBaseTestingUtility();
testUtil.startMiniDFSCluster(1);
testUtil.startMiniZKCluster(1);
testUtil.createRootDir();
cluster = new LocalHBaseCluster(testUtil.getConfiguration(), 0, 0);
}
示例8: testMasterShutdownBeforeStartingAnyRegionServer
import org.apache.hadoop.hbase.HBaseTestingUtility; //導入方法依賴的package包/類
@Test(timeout = 60000)
public void testMasterShutdownBeforeStartingAnyRegionServer() throws Exception {
final int NUM_MASTERS = 1;
final int NUM_RS = 0;
// Create config to use for this cluster
Configuration conf = HBaseConfiguration.create();
conf.setInt("hbase.ipc.client.failed.servers.expiry", 200);
conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, 1);
// Start the cluster
final HBaseTestingUtility util = new HBaseTestingUtility(conf);
util.startMiniDFSCluster(3);
util.startMiniZKCluster();
util.createRootDir();
final LocalHBaseCluster cluster =
new LocalHBaseCluster(conf, NUM_MASTERS, NUM_RS, HMaster.class,
MiniHBaseCluster.MiniHBaseClusterRegionServer.class);
final int MASTER_INDEX = 0;
final MasterThread master = cluster.getMasters().get(MASTER_INDEX);
master.start();
LOG.info("Called master start on " + master.getName());
Thread shutdownThread = new Thread() {
public void run() {
LOG.info("Before call to shutdown master");
try {
try (Connection connection =
ConnectionFactory.createConnection(util.getConfiguration())) {
try (Admin admin = connection.getAdmin()) {
admin.shutdown();
}
}
LOG.info("After call to shutdown master");
cluster.waitOnMaster(MASTER_INDEX);
} catch (Exception e) {
}
};
};
shutdownThread.start();
LOG.info("Called master join on " + master.getName());
master.join();
shutdownThread.join();
List<MasterThread> masterThreads = cluster.getMasters();
// make sure all the masters properly shutdown
assertEquals(0, masterThreads.size());
util.shutdownMiniZKCluster();
util.shutdownMiniDFSCluster();
util.cleanupTestDir();
}
示例9: testRSTermnationAfterRegisteringToMasterBeforeCreatingEphemeralNod
import org.apache.hadoop.hbase.HBaseTestingUtility; //導入方法依賴的package包/類
/**
* Test verifies whether a region server is removing from online servers list in master if it went
* down after registering with master.
* @throws Exception
*/
@Test(timeout = 180000)
public void testRSTermnationAfterRegisteringToMasterBeforeCreatingEphemeralNod() throws Exception {
final int NUM_MASTERS = 1;
final int NUM_RS = 2;
firstRS.set(true);
// Create config to use for this cluster
Configuration conf = HBaseConfiguration.create();
conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, 1);
// Start the cluster
final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(conf);
TEST_UTIL.startMiniDFSCluster(3);
TEST_UTIL.startMiniZKCluster();
TEST_UTIL.createRootDir();
final LocalHBaseCluster cluster =
new LocalHBaseCluster(conf, NUM_MASTERS, NUM_RS, HMaster.class, MockedRegionServer.class);
final MasterThread master = cluster.getMasters().get(0);
master.start();
try {
long startTime = System.currentTimeMillis();
while (!master.getMaster().isActiveMaster()) {
try {
Thread.sleep(100);
} catch (InterruptedException ignored) {
}
if (System.currentTimeMillis() > startTime + 30000) {
throw new RuntimeException("Master not active after 30 seconds");
}
}
masterActive = true;
cluster.getRegionServers().get(0).start();
cluster.getRegionServers().get(1).start();
Thread.sleep(10000);
List<ServerName> onlineServersList =
master.getMaster().getServerManager().getOnlineServersList();
while (onlineServersList.size() > 1) {
Thread.sleep(100);
onlineServersList = master.getMaster().getServerManager().getOnlineServersList();
}
assertEquals(onlineServersList.size(), 1);
cluster.shutdown();
} finally {
masterActive = false;
firstRS.set(true);
TEST_UTIL.shutdownMiniCluster();
}
}
示例10: testHDFSLinkReadDuringDelete
import org.apache.hadoop.hbase.HBaseTestingUtility; //導入方法依賴的package包/類
/**
* Test that link is still readable even when the current file gets deleted.
*
* NOTE: This test is valid only on HDFS.
* When a file is deleted from a local file-system, it is simply 'unlinked'.
* The inode, which contains the file's data, is not deleted until all
* processes have finished with it.
* In HDFS when the request exceed the cached block locations,
* a query to the namenode is performed, using the filename,
* and the deleted file doesn't exists anymore (FileNotFoundException).
*/
@Test
public void testHDFSLinkReadDuringDelete() throws Exception {
HBaseTestingUtility testUtil = new HBaseTestingUtility();
Configuration conf = testUtil.getConfiguration();
conf.setInt("dfs.blocksize", 1024 * 1024);
conf.setInt("dfs.client.read.prefetch.size", 2 * 1024 * 1024);
testUtil.startMiniDFSCluster(1);
MiniDFSCluster cluster = testUtil.getDFSCluster();
FileSystem fs = cluster.getFileSystem();
assertEquals("hdfs", fs.getUri().getScheme());
try {
List<Path> files = new ArrayList<Path>();
for (int i = 0; i < 3; i++) {
Path path = new Path(String.format("test-data-%d", i));
writeSomeData(fs, path, 1 << 20, (byte)i);
files.add(path);
}
FileLink link = new FileLink(files);
FSDataInputStream in = link.open(fs);
try {
byte[] data = new byte[8192];
int n;
// Switch to file 1
n = in.read(data);
dataVerify(data, n, (byte)0);
fs.delete(files.get(0), true);
skipBuffer(in, (byte)0);
// Switch to file 2
n = in.read(data);
dataVerify(data, n, (byte)1);
fs.delete(files.get(1), true);
skipBuffer(in, (byte)1);
// Switch to file 3
n = in.read(data);
dataVerify(data, n, (byte)2);
fs.delete(files.get(2), true);
skipBuffer(in, (byte)2);
// No more files available
try {
n = in.read(data);
assert(n <= 0);
} catch (FileNotFoundException e) {
assertTrue(true);
}
} finally {
in.close();
}
} finally {
testUtil.shutdownMiniCluster();
}
}