本文整理汇总了Java中org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter类的典型用法代码示例。如果您正苦于以下问题:Java NameNodeAdapter类的具体用法?Java NameNodeAdapter怎么用?Java NameNodeAdapter使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
NameNodeAdapter类属于org.apache.hadoop.hdfs.server.namenode包,在下文中一共展示了NameNodeAdapter类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: setUp
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; //导入依赖的package包/类
@BeforeClass
public static void setUp() throws Exception {
final Configuration conf = new Configuration();
conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.YARN_FRAMEWORK_NAME);
conf.set(YarnConfiguration.RM_PRINCIPAL, "jt_id/" + SecurityUtil.HOSTNAME_PATTERN + "@APACHE.ORG");
final MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
builder.checkExitOnShutdown(true);
builder.numDataNodes(numSlaves);
builder.format(true);
builder.racks(null);
dfsCluster = builder.build();
mrCluster = new MiniMRYarnCluster(TestBinaryTokenFile.class.getName(), noOfNMs);
mrCluster.init(conf);
mrCluster.start();
NameNodeAdapter.getDtSecretManager(dfsCluster.getNamesystem()).startThreads();
FileSystem fs = dfsCluster.getFileSystem();
p1 = new Path("file1");
p1 = fs.makeQualified(p1);
}
示例2: testGetBlockLocationsOnlyUsesReadLock
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; //导入依赖的package包/类
/**
* Test that when access time updates are not needed, the FSNamesystem
* write lock is not taken by getBlockLocations.
* Regression test for HDFS-3981.
*/
@Test(timeout=60000)
public void testGetBlockLocationsOnlyUsesReadLock() throws IOException {
Configuration conf = new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY, 100*1000);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(0)
.build();
ReentrantReadWriteLock spyLock = NameNodeAdapter.spyOnFsLock(cluster.getNamesystem());
try {
// Create empty file in the FSN.
Path p = new Path("/empty-file");
DFSTestUtil.createFile(cluster.getFileSystem(), p, 0, (short)1, 0L);
// getBlockLocations() should not need the write lock, since we just created
// the file (and thus its access time is already within the 100-second
// accesstime precision configured above).
MockitoUtil.doThrowWhenCallStackMatches(
new AssertionError("Should not need write lock"),
".*getBlockLocations.*")
.when(spyLock).writeLock();
cluster.getFileSystem().getFileBlockLocations(p, 0, 100);
} finally {
cluster.shutdown();
}
}
示例3: assertCanStartHaNameNodes
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; //导入依赖的package包/类
private void assertCanStartHaNameNodes(String pathSuffix)
throws ServiceFailedException, IOException, URISyntaxException,
InterruptedException {
// Now should be able to start both NNs. Pass "false" here so that we don't
// try to waitActive on all NNs, since the second NN doesn't exist yet.
cluster.restartNameNode(0, false);
cluster.restartNameNode(1, true);
// Make sure HA is working.
cluster.getNameNode(0).getRpcServer().transitionToActive(
new StateChangeRequestInfo(RequestSource.REQUEST_BY_USER));
FileSystem fs = null;
try {
Path newPath = new Path(TEST_PATH, pathSuffix);
fs = HATestUtil.configureFailoverFs(cluster, conf);
assertTrue(fs.mkdirs(newPath));
HATestUtil.waitForStandbyToCatchUp(cluster.getNameNode(0),
cluster.getNameNode(1));
assertTrue(NameNodeAdapter.getFileInfo(cluster.getNameNode(1),
newPath.toString(), false).isDir());
} finally {
if (fs != null) {
fs.close();
}
}
}
示例4: testEnterSafeModeInANNShouldNotThrowNPE
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; //导入依赖的package包/类
/**
* Test case for enter safemode in active namenode, when it is already in startup safemode.
* It is a regression test for HDFS-2747.
*/
@Test
public void testEnterSafeModeInANNShouldNotThrowNPE() throws Exception {
banner("Restarting active");
DFSTestUtil
.createFile(fs, new Path("/test"), 3 * BLOCK_SIZE, (short) 3, 1L);
restartActive();
nn0.getRpcServer().transitionToActive(
new StateChangeRequestInfo(RequestSource.REQUEST_BY_USER));
FSNamesystem namesystem = nn0.getNamesystem();
String status = namesystem.getSafemode();
assertTrue("Bad safemode status: '" + status + "'", status
.startsWith("Safe mode is ON."));
NameNodeAdapter.enterSafeMode(nn0, false);
assertTrue("Failed to enter into safemode in active", namesystem
.isInSafeMode());
NameNodeAdapter.enterSafeMode(nn0, false);
assertTrue("Failed to enter into safemode in active", namesystem
.isInSafeMode());
}
示例5: testBlocksDeletedInEditLog
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; //导入依赖的package包/类
/**
* Regression test for a bug experienced while developing
* HDFS-2742. The scenario here is:
* - image contains some blocks
* - edits log contains at least one block addition, followed
* by deletion of more blocks than were added.
* - When node starts up, some incorrect accounting of block
* totals caused an assertion failure.
*/
@Test
public void testBlocksDeletedInEditLog() throws Exception {
banner("Starting with NN0 active and NN1 standby, creating some blocks");
// Make 4 blocks persisted in the image.
DFSTestUtil.createFile(fs, new Path("/test"),
4*BLOCK_SIZE, (short) 3, 1L);
NameNodeAdapter.enterSafeMode(nn0, false);
NameNodeAdapter.saveNamespace(nn0);
NameNodeAdapter.leaveSafeMode(nn0);
// OP_ADD for 2 blocks
DFSTestUtil.createFile(fs, new Path("/test2"),
2*BLOCK_SIZE, (short) 3, 1L);
// OP_DELETE for 4 blocks
fs.delete(new Path("/test"), true);
restartActive();
}
示例6: createEmptyInProgressEditLog
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; //导入依赖的package包/类
private static void createEmptyInProgressEditLog(MiniDFSCluster cluster,
NameNode nn, boolean writeHeader) throws IOException {
long txid = nn.getNamesystem().getEditLog().getLastWrittenTxId();
URI sharedEditsUri = cluster.getSharedEditsDir(0, 1);
File sharedEditsDir = new File(sharedEditsUri.getPath());
StorageDirectory storageDir = new StorageDirectory(sharedEditsDir);
File inProgressFile = NameNodeAdapter.getInProgressEditsFile(storageDir,
txid + 1);
assertTrue("Failed to create in-progress edits file",
inProgressFile.createNewFile());
if (writeHeader) {
DataOutputStream out = new DataOutputStream(new FileOutputStream(
inProgressFile));
EditLogFileOutputStream.writeHeader(
NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION, out);
out.close();
}
}
示例7: testMissingBlock
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; //导入依赖的package包/类
/** Test to ensure metrics reflects missing blocks */
@Test
public void testMissingBlock() throws Exception {
// Create a file with single block with two replicas
Path file = getTestPath("testMissingBlocks");
createFile(file, 100, (short)1);
// Corrupt the only replica of the block to result in a missing block
LocatedBlock block = NameNodeAdapter.getBlockLocations(
cluster.getNameNode(), file.toString(), 0, 1).get(0);
cluster.getNamesystem().writeLock();
try {
bm.findAndMarkBlockAsCorrupt(block.getBlock(), block.getLocations()[0],
"STORAGE_ID", "TEST");
} finally {
cluster.getNamesystem().writeUnlock();
}
updateMetrics();
MetricsRecordBuilder rb = getMetrics(NS_METRICS);
assertGauge("UnderReplicatedBlocks", 1L, rb);
assertGauge("MissingBlocks", 1L, rb);
assertGauge("MissingReplOneBlocks", 1L, rb);
fs.delete(file, true);
waitForDnMetricValue(NS_METRICS, "UnderReplicatedBlocks", 0L);
}
示例8: testReadSnapshotFileWithCheckpoint
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; //导入依赖的package包/类
@Test(timeout = 30000)
public void testReadSnapshotFileWithCheckpoint() throws Exception {
Path foo = new Path("/foo");
hdfs.mkdirs(foo);
hdfs.allowSnapshot(foo);
Path bar = new Path("/foo/bar");
DFSTestUtil.createFile(hdfs, bar, 100, (short) 2, 100024L);
hdfs.createSnapshot(foo, "s1");
assertTrue(hdfs.delete(bar, true));
// checkpoint
NameNode nameNode = cluster.getNameNode();
NameNodeAdapter.enterSafeMode(nameNode, false);
NameNodeAdapter.saveNamespace(nameNode);
NameNodeAdapter.leaveSafeMode(nameNode);
// restart namenode to load snapshot files from fsimage
cluster.restartNameNode(true);
String snapshotPath = Snapshot.getSnapshotPath(foo.toString(), "s1/bar");
DFSTestUtil.readFile(hdfs, new Path(snapshotPath));
}
示例9: testWithCheckpoint
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; //导入依赖的package包/类
@Test
public void testWithCheckpoint() throws Exception {
Path path = new Path("/test");
doWriteAndAbort(fs, path);
fs.delete(new Path("/test/test"), true);
NameNode nameNode = cluster.getNameNode();
NameNodeAdapter.enterSafeMode(nameNode, false);
NameNodeAdapter.saveNamespace(nameNode);
NameNodeAdapter.leaveSafeMode(nameNode);
cluster.restartNameNode(true);
// read snapshot file after restart
String test2snapshotPath = Snapshot.getSnapshotPath(path.toString(),
"s1/test/test2");
DFSTestUtil.readFile(fs, new Path(test2snapshotPath));
String test3snapshotPath = Snapshot.getSnapshotPath(path.toString(),
"s1/test/test3");
DFSTestUtil.readFile(fs, new Path(test3snapshotPath));
}
示例10: testFilesDeletionWithCheckpoint
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; //导入依赖的package包/类
@Test
public void testFilesDeletionWithCheckpoint() throws Exception {
Path path = new Path("/test");
doWriteAndAbort(fs, path);
fs.delete(new Path("/test/test/test2"), true);
fs.delete(new Path("/test/test/test3"), true);
NameNode nameNode = cluster.getNameNode();
NameNodeAdapter.enterSafeMode(nameNode, false);
NameNodeAdapter.saveNamespace(nameNode);
NameNodeAdapter.leaveSafeMode(nameNode);
cluster.restartNameNode(true);
// read snapshot file after restart
String test2snapshotPath = Snapshot.getSnapshotPath(path.toString(),
"s1/test/test2");
DFSTestUtil.readFile(fs, new Path(test2snapshotPath));
String test3snapshotPath = Snapshot.getSnapshotPath(path.toString(),
"s1/test/test3");
DFSTestUtil.readFile(fs, new Path(test3snapshotPath));
}
示例11: doTestMultipleSnapshots
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; //导入依赖的package包/类
private void doTestMultipleSnapshots(boolean saveNamespace)
throws IOException {
Path path = new Path("/test");
doWriteAndAbort(fs, path);
fs.createSnapshot(path, "s2");
fs.delete(new Path("/test/test"), true);
fs.deleteSnapshot(path, "s2");
cluster.triggerBlockReports();
if (saveNamespace) {
NameNode nameNode = cluster.getNameNode();
NameNodeAdapter.enterSafeMode(nameNode, false);
NameNodeAdapter.saveNamespace(nameNode);
NameNodeAdapter.leaveSafeMode(nameNode);
}
cluster.restartNameNode(true);
}
示例12: testOpenFilesWithRename
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; //导入依赖的package包/类
@Test
public void testOpenFilesWithRename() throws Exception {
Path path = new Path("/test");
doWriteAndAbort(fs, path);
// check for zero sized blocks
Path fileWithEmptyBlock = new Path("/test/test/test4");
fs.create(fileWithEmptyBlock);
NamenodeProtocols nameNodeRpc = cluster.getNameNodeRpc();
String clientName = fs.getClient().getClientName();
// create one empty block
nameNodeRpc.addBlock(fileWithEmptyBlock.toString(), clientName, null, null,
INodeId.GRANDFATHER_INODE_ID, null);
fs.createSnapshot(path, "s2");
fs.rename(new Path("/test/test"), new Path("/test/test-renamed"));
fs.delete(new Path("/test/test-renamed"), true);
NameNode nameNode = cluster.getNameNode();
NameNodeAdapter.enterSafeMode(nameNode, false);
NameNodeAdapter.saveNamespace(nameNode);
NameNodeAdapter.leaveSafeMode(nameNode);
cluster.restartNameNode(true);
}
示例13: testSufficientlySingleReplBlockUsesNewRack
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; //导入依赖的package包/类
@Test
public void testSufficientlySingleReplBlockUsesNewRack() throws Exception {
Configuration conf = getConf();
short REPLICATION_FACTOR = 1;
final Path filePath = new Path("/testFile");
String racks[] = {"/rack1", "/rack1", "/rack1", "/rack2"};
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(racks.length).racks(racks).build();
final FSNamesystem ns = cluster.getNameNode().getNamesystem();
try {
// Create a file with one block with a replication factor of 1
final FileSystem fs = cluster.getFileSystem();
DFSTestUtil.createFile(fs, filePath, 1L, REPLICATION_FACTOR, 1L);
ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, filePath);
DFSTestUtil.waitForReplication(cluster, b, 1, REPLICATION_FACTOR, 0);
REPLICATION_FACTOR = 2;
NameNodeAdapter.setReplication(ns, "/testFile", REPLICATION_FACTOR);
DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
} finally {
cluster.shutdown();
}
}
示例14: testExcessBlocks
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; //导入依赖的package包/类
/** Create excess blocks by reducing the replication factor for
* for a file and ensure metrics reflects it
*/
@Test
public void testExcessBlocks() throws Exception {
Path file = getTestPath("testExcessBlocks");
createFile(file, 100, (short)2);
NameNodeAdapter.setReplication(namesystem, file.toString(), (short)1);
MetricsRecordBuilder rb = getMetrics(NS_METRICS);
assertGauge("ExcessBlocks", 1L, rb);
// verify ExcessBlocks metric is decremented and
// excessReplicateMap is cleared after deleting a file
fs.delete(file, true);
rb = getMetrics(NS_METRICS);
assertGauge("ExcessBlocks", 0L, rb);
assertTrue(bm.excessReplicateMap.isEmpty());
}
示例15: testMissingBlock
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; //导入依赖的package包/类
/** Test to ensure metrics reflects missing blocks */
@Test
public void testMissingBlock() throws Exception {
// Create a file with single block with two replicas
Path file = getTestPath("testMissingBlocks");
createFile(file, 100, (short)1);
// Corrupt the only replica of the block to result in a missing block
LocatedBlock block = NameNodeAdapter.getBlockLocations(
cluster.getNameNode(), file.toString(), 0, 1).get(0);
cluster.getNamesystem().writeLock();
try {
bm.findAndMarkBlockAsCorrupt(block.getBlock(), block.getLocations()[0],
"STORAGE_ID", "TEST");
} finally {
cluster.getNamesystem().writeUnlock();
}
Thread.sleep(1000); // Wait for block to be marked corrupt
MetricsRecordBuilder rb = getMetrics(NS_METRICS);
assertGauge("UnderReplicatedBlocks", 1L, rb);
assertGauge("MissingBlocks", 1L, rb);
assertGauge("MissingReplOneBlocks", 1L, rb);
fs.delete(file, true);
waitForDnMetricValue(NS_METRICS, "UnderReplicatedBlocks", 0L);
}