本文整理汇总了Java中org.apache.hadoop.hdfs.tools.DFSAdmin.run方法的典型用法代码示例。如果您正苦于以下问题:Java DFSAdmin.run方法的具体用法?Java DFSAdmin.run怎么用?Java DFSAdmin.run使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hdfs.tools.DFSAdmin
的用法示例。
在下文中一共展示了DFSAdmin.run方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testMultipleRegistration
import org.apache.hadoop.hdfs.tools.DFSAdmin; //导入方法依赖的package包/类
@Test
public void testMultipleRegistration() throws Exception {
RefreshRegistry.defaultRegistry().register("sharedId", firstHandler);
RefreshRegistry.defaultRegistry().register("sharedId", secondHandler);
// this should trigger both
DFSAdmin admin = new DFSAdmin(config);
String[] args = new String[]{"-refresh", "localhost:" +
cluster.getNameNodePort(), "sharedId", "one"};
int exitCode = admin.run(args);
assertEquals(-1, exitCode); // -1 because one of the responses is unregistered
// verify we called both
Mockito.verify(firstHandler).handleRefresh("sharedId", new String[]{"one"});
Mockito.verify(secondHandler).handleRefresh("sharedId", new String[]{"one"});
RefreshRegistry.defaultRegistry().unregisterAll("sharedId");
}
示例2: testExceptionResultsInNormalError
import org.apache.hadoop.hdfs.tools.DFSAdmin; //导入方法依赖的package包/类
@Test
public void testExceptionResultsInNormalError() throws Exception {
// In this test, we ensure that all handlers are called even if we throw an exception in one
RefreshHandler exceptionalHandler = Mockito.mock(RefreshHandler.class);
Mockito.stub(exceptionalHandler.handleRefresh(Mockito.anyString(), Mockito.any(String[].class)))
.toThrow(new RuntimeException("Exceptional Handler Throws Exception"));
RefreshHandler otherExceptionalHandler = Mockito.mock(RefreshHandler.class);
Mockito.stub(otherExceptionalHandler.handleRefresh(Mockito.anyString(), Mockito.any(String[].class)))
.toThrow(new RuntimeException("More Exceptions"));
RefreshRegistry.defaultRegistry().register("exceptional", exceptionalHandler);
RefreshRegistry.defaultRegistry().register("exceptional", otherExceptionalHandler);
DFSAdmin admin = new DFSAdmin(config);
String[] args = new String[]{"-refresh", "localhost:" +
cluster.getNameNodePort(), "exceptional"};
int exitCode = admin.run(args);
assertEquals(-1, exitCode); // Exceptions result in a -1
Mockito.verify(exceptionalHandler).handleRefresh("exceptional", new String[]{});
Mockito.verify(otherExceptionalHandler).handleRefresh("exceptional", new String[]{});
RefreshRegistry.defaultRegistry().unregisterAll("exceptional");
}
示例3: testRefresh
import org.apache.hadoop.hdfs.tools.DFSAdmin; //导入方法依赖的package包/类
@Test
public void testRefresh() throws Exception {
assertTrue("Mock queue should have been constructed", mockQueueConstructions > 0);
assertTrue("Puts are routed through MockQueue", canPutInMockQueue());
int lastMockQueueConstructions = mockQueueConstructions;
// Replace queue with the queue specified in core-site.xml, which would be the LinkedBlockingQueue
DFSAdmin admin = new DFSAdmin(config);
String [] args = new String[]{"-refreshCallQueue"};
int exitCode = admin.run(args);
assertEquals("DFSAdmin should return 0", 0, exitCode);
assertEquals("Mock queue should have no additional constructions", lastMockQueueConstructions, mockQueueConstructions);
try {
assertFalse("Puts are routed through LBQ instead of MockQueue", canPutInMockQueue());
} catch (IOException ioe){
fail("Could not put into queue at all");
}
}
示例4: runGetBalancerBandwidthCmd
import org.apache.hadoop.hdfs.tools.DFSAdmin; //导入方法依赖的package包/类
private void runGetBalancerBandwidthCmd(DFSAdmin admin, String[] args,
ClientDatanodeProtocol proxy, long expectedBandwidth) throws Exception {
PrintStream initialStdOut = System.out;
outContent.reset();
try {
System.setOut(outStream);
int exitCode = admin.run(args);
assertEquals("DFSAdmin should return 0", 0, exitCode);
String bandwidthOutMsg = "Balancer bandwidth is " + expectedBandwidth
+ " bytes per second.";
String strOut = new String(outContent.toByteArray(), UTF8);
assertTrue("Wrong balancer bandwidth!", strOut.contains(bandwidthOutMsg));
} finally {
System.setOut(initialStdOut);
}
}
示例5: testGroupMappingRefresh
import org.apache.hadoop.hdfs.tools.DFSAdmin; //导入方法依赖的package包/类
@Test
public void testGroupMappingRefresh() throws Exception {
DFSAdmin admin = new DFSAdmin(config);
String [] args = new String[]{"-refreshUserToGroupsMappings"};
Groups groups = Groups.getUserToGroupsMappingService(config);
String user = UserGroupInformation.getCurrentUser().getUserName();
System.out.println("first attempt:");
List<String> g1 = groups.getGroups(user);
String [] str_groups = new String [g1.size()];
g1.toArray(str_groups);
System.out.println(Arrays.toString(str_groups));
System.out.println("second attempt, should be same:");
List<String> g2 = groups.getGroups(user);
g2.toArray(str_groups);
System.out.println(Arrays.toString(str_groups));
for(int i=0; i<g2.size(); i++) {
assertEquals("Should be same group ", g1.get(i), g2.get(i));
}
admin.run(args);
System.out.println("third attempt(after refresh command), should be different:");
List<String> g3 = groups.getGroups(user);
g3.toArray(str_groups);
System.out.println(Arrays.toString(str_groups));
for(int i=0; i<g3.size(); i++) {
assertFalse("Should be different group: " + g1.get(i) + " and " + g3.get(i),
g1.get(i).equals(g3.get(i)));
}
// test time out
Thread.sleep(groupRefreshTimeoutSec*1100);
System.out.println("fourth attempt(after timeout), should be different:");
List<String> g4 = groups.getGroups(user);
g4.toArray(str_groups);
System.out.println(Arrays.toString(str_groups));
for(int i=0; i<g4.size(); i++) {
assertFalse("Should be different group ", g3.get(i).equals(g4.get(i)));
}
}
示例6: testInvalidCommand
import org.apache.hadoop.hdfs.tools.DFSAdmin; //导入方法依赖的package包/类
@Test
public void testInvalidCommand() throws Exception {
DFSAdmin admin = new DFSAdmin(config);
String [] args = new String[]{"-refresh", "nn"};
int exitCode = admin.run(args);
assertEquals("DFSAdmin should fail due to bad args", -1, exitCode);
}
示例7: testInvalidIdentifier
import org.apache.hadoop.hdfs.tools.DFSAdmin; //导入方法依赖的package包/类
@Test
public void testInvalidIdentifier() throws Exception {
DFSAdmin admin = new DFSAdmin(config);
String [] args = new String[]{"-refresh", "localhost:" +
cluster.getNameNodePort(), "unregisteredIdentity"};
int exitCode = admin.run(args);
assertEquals("DFSAdmin should fail due to no handler registered", -1, exitCode);
}
示例8: testValidIdentifier
import org.apache.hadoop.hdfs.tools.DFSAdmin; //导入方法依赖的package包/类
@Test
public void testValidIdentifier() throws Exception {
DFSAdmin admin = new DFSAdmin(config);
String[] args = new String[]{"-refresh",
"localhost:" + cluster.getNameNodePort(), "firstHandler"};
int exitCode = admin.run(args);
assertEquals("DFSAdmin should succeed", 0, exitCode);
Mockito.verify(firstHandler).handleRefresh("firstHandler", new String[]{});
// Second handler was never called
Mockito.verify(secondHandler, Mockito.never())
.handleRefresh(Mockito.anyString(), Mockito.any(String[].class));
}
示例9: testVariableArgs
import org.apache.hadoop.hdfs.tools.DFSAdmin; //导入方法依赖的package包/类
@Test
public void testVariableArgs() throws Exception {
DFSAdmin admin = new DFSAdmin(config);
String[] args = new String[]{"-refresh", "localhost:" +
cluster.getNameNodePort(), "secondHandler", "one"};
int exitCode = admin.run(args);
assertEquals("DFSAdmin should return 2", 2, exitCode);
exitCode = admin.run(new String[]{"-refresh", "localhost:" +
cluster.getNameNodePort(), "secondHandler", "one", "two"});
assertEquals("DFSAdmin should now return 3", 3, exitCode);
Mockito.verify(secondHandler).handleRefresh("secondHandler", new String[]{"one"});
Mockito.verify(secondHandler).handleRefresh("secondHandler", new String[]{"one", "two"});
}
示例10: testUnregistration
import org.apache.hadoop.hdfs.tools.DFSAdmin; //导入方法依赖的package包/类
@Test
public void testUnregistration() throws Exception {
RefreshRegistry.defaultRegistry().unregisterAll("firstHandler");
// And now this should fail
DFSAdmin admin = new DFSAdmin(config);
String[] args = new String[]{"-refresh", "localhost:" +
cluster.getNameNodePort(), "firstHandler"};
int exitCode = admin.run(args);
assertEquals("DFSAdmin should return -1", -1, exitCode);
}
示例11: testMultipleReturnCodeMerging
import org.apache.hadoop.hdfs.tools.DFSAdmin; //导入方法依赖的package包/类
@Test
public void testMultipleReturnCodeMerging() throws Exception {
// Two handlers which return two non-zero values
RefreshHandler handlerOne = Mockito.mock(RefreshHandler.class);
Mockito.stub(handlerOne.handleRefresh(Mockito.anyString(), Mockito.any(String[].class)))
.toReturn(new RefreshResponse(23, "Twenty Three"));
RefreshHandler handlerTwo = Mockito.mock(RefreshHandler.class);
Mockito.stub(handlerTwo.handleRefresh(Mockito.anyString(), Mockito.any(String[].class)))
.toReturn(new RefreshResponse(10, "Ten"));
// Then registered to the same ID
RefreshRegistry.defaultRegistry().register("shared", handlerOne);
RefreshRegistry.defaultRegistry().register("shared", handlerTwo);
// We refresh both
DFSAdmin admin = new DFSAdmin(config);
String[] args = new String[]{"-refresh", "localhost:" +
cluster.getNameNodePort(), "shared"};
int exitCode = admin.run(args);
assertEquals(-1, exitCode); // We get -1 because of our logic for melding non-zero return codes
// Verify we called both
Mockito.verify(handlerOne).handleRefresh("shared", new String[]{});
Mockito.verify(handlerTwo).handleRefresh("shared", new String[]{});
RefreshRegistry.defaultRegistry().unregisterAll("shared");
}
示例12: testInvalidShell
import org.apache.hadoop.hdfs.tools.DFSAdmin; //导入方法依赖的package包/类
/**
* default setting is file:// which is not a DFS
* so DFSAdmin should throw and catch InvalidArgumentException
* and return -1 exit code.
* @throws Exception
*/
@Test (timeout = 30000)
public void testInvalidShell() throws Exception {
Configuration conf = new Configuration(); // default FS (non-DFS)
DFSAdmin admin = new DFSAdmin();
admin.setConf(conf);
int res = admin.run(new String[] {"-refreshNodes"});
assertEquals("expected to fail -1", res , -1);
}
示例13: runFetchImage
import org.apache.hadoop.hdfs.tools.DFSAdmin; //导入方法依赖的package包/类
/**
* Run `hdfs dfsadmin -fetchImage ...' and verify that the downloaded image is
* correct.
*/
private static void runFetchImage(DFSAdmin dfsAdmin, MiniDFSCluster cluster)
throws Exception {
int retVal = dfsAdmin.run(new String[]{"-fetchImage",
FETCHED_IMAGE_FILE.getPath() });
assertEquals(0, retVal);
File highestImageOnNn = getHighestFsImageOnCluster(cluster);
MD5Hash expected = MD5FileUtils.computeMd5ForFile(highestImageOnNn);
MD5Hash actual = MD5FileUtils.computeMd5ForFile(
new File(FETCHED_IMAGE_FILE, highestImageOnNn.getName()));
assertEquals(expected, actual);
}
示例14: runCommand
import org.apache.hadoop.hdfs.tools.DFSAdmin; //导入方法依赖的package包/类
private void runCommand(DFSAdmin admin, String args[], boolean expectEror)
throws Exception {
int val = admin.run(args);
if (expectEror) {
assertEquals(val, -1);
} else {
assertTrue(val>=0);
}
}
示例15: runCommand
import org.apache.hadoop.hdfs.tools.DFSAdmin; //导入方法依赖的package包/类
private void runCommand(DFSAdmin admin, String args[], boolean expectEror)
throws Exception {
int val = admin.run(args);
if (expectEror) {
assertEquals(val, -1);
} else {
assertTrue(val >= 0);
}
}