本文整理汇总了Java中org.apache.hadoop.hdfs.DFSTestUtil.waitReplication方法的典型用法代码示例。如果您正苦于以下问题:Java DFSTestUtil.waitReplication方法的具体用法?Java DFSTestUtil.waitReplication怎么用?Java DFSTestUtil.waitReplication使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hdfs.DFSTestUtil
的用法示例。
在下文中一共展示了DFSTestUtil.waitReplication方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testConfigureMinValidVolumes
import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
/**
* Test the DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY configuration
* option, ie the DN shuts itself down when the number of failures
* experienced drops below the tolerated amount.
*/
@Test
public void testConfigureMinValidVolumes() throws Exception {
assumeTrue(!System.getProperty("os.name").startsWith("Windows"));
// Bring up two additional datanodes that need both of their volumes
// functioning in order to stay up.
conf.setInt(DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY, 0);
cluster.startDataNodes(conf, 2, true, null, null);
cluster.waitActive();
final DatanodeManager dm = cluster.getNamesystem().getBlockManager(
).getDatanodeManager();
long origCapacity = DFSTestUtil.getLiveDatanodeCapacity(dm);
long dnCapacity = DFSTestUtil.getDatanodeCapacity(dm, 0);
// Fail a volume on the 2nd DN
File dn2Vol1 = new File(dataDir, "data"+(2*1+1));
DataNodeTestUtils.injectDataDirFailure(dn2Vol1);
// Should only get two replicas (the first DN and the 3rd)
Path file1 = new Path("/test1");
DFSTestUtil.createFile(fs, file1, 1024, (short)3, 1L);
DFSTestUtil.waitReplication(fs, file1, (short)2);
// Check that this single failure caused a DN to die.
DFSTestUtil.waitForDatanodeStatus(dm, 2, 1, 0,
origCapacity - (1*dnCapacity), WAIT_FOR_HEARTBEATS);
// If we restore the volume we should still only be able to get
// two replicas since the DN is still considered dead.
DataNodeTestUtils.restoreDataDirFromFailure(dn2Vol1);
Path file2 = new Path("/test2");
DFSTestUtil.createFile(fs, file2, 1024, (short)3, 1L);
DFSTestUtil.waitReplication(fs, file2, (short)2);
}
示例2: testSetrepIncWithUnderReplicatedBlocks
import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
@Test(timeout=60000) // 1 min timeout
public void testSetrepIncWithUnderReplicatedBlocks() throws Exception {
Configuration conf = new HdfsConfiguration();
final short REPLICATION_FACTOR = 2;
final String FILE_NAME = "/testFile";
final Path FILE_PATH = new Path(FILE_NAME);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION_FACTOR + 1).build();
try {
// create a file with one block with a replication factor of 2
final FileSystem fs = cluster.getFileSystem();
DFSTestUtil.createFile(fs, FILE_PATH, 1L, REPLICATION_FACTOR, 1L);
DFSTestUtil.waitReplication(fs, FILE_PATH, REPLICATION_FACTOR);
// remove one replica from the blocksMap so block becomes under-replicated
// but the block does not get put into the under-replicated blocks queue
final BlockManager bm = cluster.getNamesystem().getBlockManager();
ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, FILE_PATH);
DatanodeDescriptor dn = bm.blocksMap.getStorages(b.getLocalBlock())
.iterator().next().getDatanodeDescriptor();
bm.addToInvalidates(b.getLocalBlock(), dn);
Thread.sleep(5000);
bm.blocksMap.removeNode(b.getLocalBlock(), dn);
// increment this file's replication factor
FsShell shell = new FsShell(conf);
assertEquals(0, shell.run(new String[]{
"-setrep", "-w", Integer.toString(1+REPLICATION_FACTOR), FILE_NAME}));
} finally {
cluster.shutdown();
}
}
示例3: createFile
import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
/** create a file with a length of <code>fileLen</code> */
private void createFile(int index, long len
) throws IOException, InterruptedException, TimeoutException {
final FileSystem fs = cluster.getFileSystem(index);
DFSTestUtil.createFile(fs, FILE_PATH, len, replication, RANDOM.nextLong());
DFSTestUtil.waitReplication(fs, FILE_PATH, replication);
}
示例4: testVolFailureStatsPreservedOnNNRestart
import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
/**
* Test that the NN re-learns of volume failures after restart.
*/
@Test
public void testVolFailureStatsPreservedOnNNRestart() throws Exception {
// Bring up two more datanodes that can tolerate 1 failure
cluster.startDataNodes(conf, 2, true, null, null);
cluster.waitActive();
final DatanodeManager dm = cluster.getNamesystem().getBlockManager(
).getDatanodeManager();
long origCapacity = DFSTestUtil.getLiveDatanodeCapacity(dm);
long dnCapacity = DFSTestUtil.getDatanodeCapacity(dm, 0);
// Fail the first volume on both datanodes (we have to keep the
// third healthy so one node in the pipeline will not fail).
File dn1Vol1 = new File(dataDir, "data"+(2*0+1));
File dn2Vol1 = new File(dataDir, "data"+(2*1+1));
DataNodeTestUtils.injectDataDirFailure(dn1Vol1, dn2Vol1);
Path file1 = new Path("/test1");
DFSTestUtil.createFile(fs, file1, 1024, (short)2, 1L);
DFSTestUtil.waitReplication(fs, file1, (short)2);
ArrayList<DataNode> dns = cluster.getDataNodes();
// The NN reports two volumes failures
DFSTestUtil.waitForDatanodeStatus(dm, 3, 0, 2,
origCapacity - (1*dnCapacity), WAIT_FOR_HEARTBEATS);
checkAggregateFailuresAtNameNode(true, 2);
checkFailuresAtNameNode(dm, dns.get(0), true, dn1Vol1.getAbsolutePath());
checkFailuresAtNameNode(dm, dns.get(1), true, dn2Vol1.getAbsolutePath());
// After restarting the NN it still see the two failures
cluster.restartNameNode(0);
cluster.waitActive();
DFSTestUtil.waitForDatanodeStatus(dm, 3, 0, 2,
origCapacity - (1*dnCapacity), WAIT_FOR_HEARTBEATS);
checkAggregateFailuresAtNameNode(true, 2);
checkFailuresAtNameNode(dm, dns.get(0), true, dn1Vol1.getAbsolutePath());
checkFailuresAtNameNode(dm, dns.get(1), true, dn2Vol1.getAbsolutePath());
}
示例5: testFsckError
import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
/** Test if fsck can return -1 in case of failure
*
* @throws Exception
*/
@Test
public void testFsckError() throws Exception {
MiniDFSCluster cluster = null;
try {
// bring up a one-node cluster
Configuration conf = new HdfsConfiguration();
cluster = new MiniDFSCluster.Builder(conf).build();
String fileName = "/test.txt";
Path filePath = new Path(fileName);
FileSystem fs = cluster.getFileSystem();
// create a one-block file
DFSTestUtil.createFile(fs, filePath, 1L, (short)1, 1L);
DFSTestUtil.waitReplication(fs, filePath, (short)1);
// intentionally corrupt NN data structure
INodeFile node = (INodeFile) cluster.getNamesystem().dir.getINode
(fileName, true);
final BlockInfoContiguous[] blocks = node.getBlocks();
assertEquals(blocks.length, 1);
blocks[0].setNumBytes(-1L); // set the block length to be negative
// run fsck and expect a failure with -1 as the error code
String outStr = runFsck(conf, -1, true, fileName);
System.out.println(outStr);
assertTrue(outStr.contains(NamenodeFsck.FAILURE_STATUS));
// clean up file system
fs.delete(filePath, true);
} finally {
if (cluster != null) {cluster.shutdown();}
}
}
示例6: createFile
import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
static void createFile(MiniDFSCluster cluster, Path filePath, long fileLen,
short replicationFactor, int nnIndex)
throws IOException, InterruptedException, TimeoutException {
FileSystem fs = cluster.getFileSystem(nnIndex);
DFSTestUtil.createFile(fs, filePath, fileLen,
replicationFactor, r.nextLong());
DFSTestUtil.waitReplication(fs, filePath, replicationFactor);
}
示例7: testFsckSymlink
import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
/** Test fsck with symlinks in the filesystem */
@Test
public void testFsckSymlink() throws Exception {
final DFSTestUtil util = new DFSTestUtil.Builder().
setName(getClass().getSimpleName()).setNumFiles(1).build();
final Configuration conf = new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);
MiniDFSCluster cluster = null;
FileSystem fs = null;
try {
final long precision = 1L;
conf.setLong(DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY, precision);
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
fs = cluster.getFileSystem();
final String fileName = "/srcdat";
util.createFiles(fs, fileName);
final FileContext fc = FileContext.getFileContext(
cluster.getConfiguration(0));
final Path file = new Path(fileName);
final Path symlink = new Path("/srcdat-symlink");
fc.createSymlink(file, symlink, false);
util.waitReplication(fs, fileName, (short)3);
long aTime = fc.getFileStatus(symlink).getAccessTime();
Thread.sleep(precision);
setupAuditLogs();
String outStr = runFsck(conf, 0, true, "/");
verifyAuditLogs();
assertEquals(aTime, fc.getFileStatus(symlink).getAccessTime());
System.out.println(outStr);
assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
assertTrue(outStr.contains("Total symlinks:\t\t1"));
util.cleanup(fs, fileName);
} finally {
if (fs != null) {try{fs.close();} catch(Exception e){}}
if (cluster != null) { cluster.shutdown(); }
}
}
示例8: testWhenDecreasingReplication
import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
/**
* The corrupt block has to be removed when the number of valid replicas
* matches replication factor for the file. In this the above condition is
* tested by reducing the replication factor
* The test strategy :
* Bring up Cluster with 3 DataNodes
* Create a file of replication factor 3
* Corrupt one replica of a block of the file
* Verify that there are still 2 good replicas and 1 corrupt replica
* (corrupt replica should not be removed since number of good
* replicas (2) is less than replication factor (3))
* Set the replication factor to 2
* Verify that the corrupt replica is removed.
* (corrupt replica should not be removed since number of good
* replicas (2) is equal to replication factor (2))
*/
@Test
public void testWhenDecreasingReplication() throws Exception {
Configuration conf = new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L);
conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, Integer.toString(2));
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
FileSystem fs = cluster.getFileSystem();
final FSNamesystem namesystem = cluster.getNamesystem();
try {
final Path fileName = new Path("/foo1");
DFSTestUtil.createFile(fs, fileName, 2, (short) 3, 0L);
DFSTestUtil.waitReplication(fs, fileName, (short) 3);
ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, fileName);
corruptBlock(cluster, fs, fileName, 0, block);
DFSTestUtil.waitReplication(fs, fileName, (short) 2);
assertEquals(2, countReplicas(namesystem, block).liveReplicas());
assertEquals(1, countReplicas(namesystem, block).corruptReplicas());
namesystem.setReplication(fileName.toString(), (short) 2);
// wait for 3 seconds so that all block reports are processed.
try {
Thread.sleep(3000);
} catch (InterruptedException ignored) {
}
assertEquals(2, countReplicas(namesystem, block).liveReplicas());
assertEquals(0, countReplicas(namesystem, block).corruptReplicas());
} finally {
cluster.shutdown();
}
}
示例9: testReplicationAdjusted
import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
/**
* Test that, if the NN restarts with a new minimum replication,
* any files created with the old replication count will get
* automatically bumped up to the new minimum upon restart.
*/
@Test
public void testReplicationAdjusted() throws Exception {
// start a cluster
Configuration conf = new HdfsConfiguration();
// Replicate and heartbeat fast to shave a few seconds off test
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1);
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2)
.build();
cluster.waitActive();
FileSystem fs = cluster.getFileSystem();
// Create a file with replication count 1
Path p = new Path("/testfile");
DFSTestUtil.createFile(fs, p, 10, /*repl*/ (short)1, 1);
DFSTestUtil.waitReplication(fs, p, (short)1);
// Shut down and restart cluster with new minimum replication of 2
cluster.shutdown();
cluster = null;
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY, 2);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2)
.format(false).build();
cluster.waitActive();
fs = cluster.getFileSystem();
// The file should get adjusted to replication 2 when
// the edit log is replayed.
DFSTestUtil.waitReplication(fs, p, (short)2);
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
示例10: createFile
import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
private static void createFile(Suite s, int index, long len
) throws IOException, InterruptedException, TimeoutException {
final FileSystem fs = s.cluster.getFileSystem(index);
DFSTestUtil.createFile(fs, FILE_PATH, len, s.replication, RANDOM.nextLong());
DFSTestUtil.waitReplication(fs, FILE_PATH, s.replication);
}
示例11: testClientMmapDisable
import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
@Test
public void testClientMmapDisable() throws Exception {
HdfsConfiguration conf = initZeroCopyTest();
conf.setBoolean(DFS_CLIENT_MMAP_ENABLED, false);
MiniDFSCluster cluster = null;
final Path TEST_PATH = new Path("/a");
final int TEST_FILE_LENGTH = 16385;
final int RANDOM_SEED = 23453;
final String CONTEXT = "testClientMmapDisable";
FSDataInputStream fsIn = null;
DistributedFileSystem fs = null;
conf.set(DFSConfigKeys.DFS_CLIENT_CONTEXT, CONTEXT);
try {
// With DFS_CLIENT_MMAP_ENABLED set to false, we should not do memory
// mapped reads.
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
fs = cluster.getFileSystem();
DFSTestUtil.createFile(fs, TEST_PATH,
TEST_FILE_LENGTH, (short)1, RANDOM_SEED);
DFSTestUtil.waitReplication(fs, TEST_PATH, (short)1);
fsIn = fs.open(TEST_PATH);
try {
fsIn.read(null, 1, EnumSet.of(ReadOption.SKIP_CHECKSUMS));
Assert.fail("expected zero-copy read to fail when client mmaps " +
"were disabled.");
} catch (UnsupportedOperationException e) {
}
} finally {
if (fsIn != null) fsIn.close();
if (fs != null) fs.close();
if (cluster != null) cluster.shutdown();
}
fsIn = null;
fs = null;
cluster = null;
try {
// Now try again with DFS_CLIENT_MMAP_CACHE_SIZE == 0. It should work.
conf.setBoolean(DFS_CLIENT_MMAP_ENABLED, true);
conf.setInt(DFS_CLIENT_MMAP_CACHE_SIZE, 0);
conf.set(DFSConfigKeys.DFS_CLIENT_CONTEXT, CONTEXT + ".1");
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
fs = cluster.getFileSystem();
DFSTestUtil.createFile(fs, TEST_PATH,
TEST_FILE_LENGTH, (short)1, RANDOM_SEED);
DFSTestUtil.waitReplication(fs, TEST_PATH, (short)1);
fsIn = fs.open(TEST_PATH);
ByteBuffer buf = fsIn.read(null, 1, EnumSet.of(ReadOption.SKIP_CHECKSUMS));
fsIn.releaseBuffer(buf);
// Test EOF behavior
IOUtils.skipFully(fsIn, TEST_FILE_LENGTH - 1);
buf = fsIn.read(null, 1, EnumSet.of(ReadOption.SKIP_CHECKSUMS));
Assert.assertEquals(null, buf);
} finally {
if (fsIn != null) fsIn.close();
if (fs != null) fs.close();
if (cluster != null) cluster.shutdown();
}
}
示例12: test2GBMmapLimit
import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
@Test
public void test2GBMmapLimit() throws Exception {
Assume.assumeTrue(BlockReaderTestUtil.shouldTestLargeFiles());
HdfsConfiguration conf = initZeroCopyTest();
final long TEST_FILE_LENGTH = 2469605888L;
conf.set(DFSConfigKeys.DFS_CHECKSUM_TYPE_KEY, "NULL");
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, TEST_FILE_LENGTH);
MiniDFSCluster cluster = null;
final Path TEST_PATH = new Path("/a");
final String CONTEXT = "test2GBMmapLimit";
conf.set(DFSConfigKeys.DFS_CLIENT_CONTEXT, CONTEXT);
FSDataInputStream fsIn = null, fsIn2 = null;
ByteBuffer buf1 = null, buf2 = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
DistributedFileSystem fs = cluster.getFileSystem();
DFSTestUtil.createFile(fs, TEST_PATH, TEST_FILE_LENGTH, (short)1, 0xB);
DFSTestUtil.waitReplication(fs, TEST_PATH, (short)1);
fsIn = fs.open(TEST_PATH);
buf1 = fsIn.read(null, 1, EnumSet.of(ReadOption.SKIP_CHECKSUMS));
Assert.assertEquals(1, buf1.remaining());
fsIn.releaseBuffer(buf1);
buf1 = null;
fsIn.seek(2147483640L);
buf1 = fsIn.read(null, 1024, EnumSet.of(ReadOption.SKIP_CHECKSUMS));
Assert.assertEquals(7, buf1.remaining());
Assert.assertEquals(Integer.MAX_VALUE, buf1.limit());
fsIn.releaseBuffer(buf1);
buf1 = null;
Assert.assertEquals(2147483647L, fsIn.getPos());
try {
buf1 = fsIn.read(null, 1024,
EnumSet.of(ReadOption.SKIP_CHECKSUMS));
Assert.fail("expected UnsupportedOperationException");
} catch (UnsupportedOperationException e) {
// expected; can't read past 2GB boundary.
}
fsIn.close();
fsIn = null;
// Now create another file with normal-sized blocks, and verify we
// can read past 2GB
final Path TEST_PATH2 = new Path("/b");
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 268435456L);
DFSTestUtil.createFile(fs, TEST_PATH2, 1024 * 1024, TEST_FILE_LENGTH,
268435456L, (short)1, 0xA);
fsIn2 = fs.open(TEST_PATH2);
fsIn2.seek(2147483640L);
buf2 = fsIn2.read(null, 1024, EnumSet.of(ReadOption.SKIP_CHECKSUMS));
Assert.assertEquals(8, buf2.remaining());
Assert.assertEquals(2147483648L, fsIn2.getPos());
fsIn2.releaseBuffer(buf2);
buf2 = null;
buf2 = fsIn2.read(null, 1024, EnumSet.of(ReadOption.SKIP_CHECKSUMS));
Assert.assertEquals(1024, buf2.remaining());
Assert.assertEquals(2147484672L, fsIn2.getPos());
fsIn2.releaseBuffer(buf2);
buf2 = null;
} finally {
if (buf1 != null) {
fsIn.releaseBuffer(buf1);
}
if (buf2 != null) {
fsIn2.releaseBuffer(buf2);
}
IOUtils.cleanup(null, fsIn, fsIn2);
if (cluster != null) {
cluster.shutdown();
}
}
}
示例13: testTruncateWithDataNodesRestart
import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
/**
* The last block is truncated at mid. (non copy-on-truncate)
* dn0 is shutdown before truncate and restart after truncate successful.
*/
@Test(timeout=60000)
public void testTruncateWithDataNodesRestart() throws Exception {
int startingFileSize = 3 * BLOCK_SIZE;
byte[] contents = AppendTestUtil.initBuffer(startingFileSize);
final Path parent = new Path("/test");
final Path p = new Path(parent, "testTruncateWithDataNodesRestart");
writeContents(contents, startingFileSize, p);
LocatedBlock oldBlock = getLocatedBlocks(p).getLastLocatedBlock();
int dn = 0;
int toTruncateLength = 1;
int newLength = startingFileSize - toTruncateLength;
cluster.getDataNodes().get(dn).shutdown();
try {
boolean isReady = fs.truncate(p, newLength);
assertFalse(isReady);
} finally {
cluster.restartDataNode(dn, true, true);
cluster.waitActive();
}
checkBlockRecovery(p);
LocatedBlock newBlock = getLocatedBlocks(p).getLastLocatedBlock();
/*
* For non copy-on-truncate, the truncated block id is the same, but the
* GS should increase.
* The truncated block will be replicated to dn0 after it restarts.
*/
assertEquals(newBlock.getBlock().getBlockId(),
oldBlock.getBlock().getBlockId());
assertEquals(newBlock.getBlock().getGenerationStamp(),
oldBlock.getBlock().getGenerationStamp() + 1);
// Wait replicas come to 3
DFSTestUtil.waitReplication(fs, p, REPLICATION);
// Old replica is disregarded and replaced with the truncated one
assertEquals(cluster.getBlockFile(dn, newBlock.getBlock()).length(),
newBlock.getBlockSize());
assertTrue(cluster.getBlockMetadataFile(dn,
newBlock.getBlock()).getName().endsWith(
newBlock.getBlock().getGenerationStamp() + ".meta"));
// Validate the file
FileStatus fileStatus = fs.getFileStatus(p);
assertThat(fileStatus.getLen(), is((long) newLength));
checkFullFile(p, newLength, contents);
fs.delete(parent, true);
}
示例14: testCopyOnTruncateWithDataNodesRestart
import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
/**
* The last block is truncated at mid. (copy-on-truncate)
* dn1 is shutdown before truncate and restart after truncate successful.
*/
@Test(timeout=60000)
public void testCopyOnTruncateWithDataNodesRestart() throws Exception {
int startingFileSize = 3 * BLOCK_SIZE;
byte[] contents = AppendTestUtil.initBuffer(startingFileSize);
final Path parent = new Path("/test");
final Path p = new Path(parent, "testCopyOnTruncateWithDataNodesRestart");
writeContents(contents, startingFileSize, p);
LocatedBlock oldBlock = getLocatedBlocks(p).getLastLocatedBlock();
fs.allowSnapshot(parent);
fs.createSnapshot(parent, "ss0");
int dn = 1;
int toTruncateLength = 1;
int newLength = startingFileSize - toTruncateLength;
cluster.getDataNodes().get(dn).shutdown();
try {
boolean isReady = fs.truncate(p, newLength);
assertFalse(isReady);
} finally {
cluster.restartDataNode(dn, true, true);
cluster.waitActive();
}
checkBlockRecovery(p);
LocatedBlock newBlock = getLocatedBlocks(p).getLastLocatedBlock();
/*
* For copy-on-truncate, new block is made with new block id and new GS.
* The replicas of the new block is 2, then it will be replicated to dn1.
*/
assertNotEquals(newBlock.getBlock().getBlockId(),
oldBlock.getBlock().getBlockId());
assertEquals(newBlock.getBlock().getGenerationStamp(),
oldBlock.getBlock().getGenerationStamp() + 1);
// Wait replicas come to 3
DFSTestUtil.waitReplication(fs, p, REPLICATION);
// New block is replicated to dn1
assertEquals(cluster.getBlockFile(dn, newBlock.getBlock()).length(),
newBlock.getBlockSize());
// Old replica exists too since there is snapshot
assertEquals(cluster.getBlockFile(dn, oldBlock.getBlock()).length(),
oldBlock.getBlockSize());
assertTrue(cluster.getBlockMetadataFile(dn,
oldBlock.getBlock()).getName().endsWith(
oldBlock.getBlock().getGenerationStamp() + ".meta"));
// Validate the file
FileStatus fileStatus = fs.getFileStatus(p);
assertThat(fileStatus.getLen(), is((long) newLength));
checkFullFile(p, newLength, contents);
fs.deleteSnapshot(parent, "ss0");
fs.delete(parent, true);
}
示例15: testTruncateWithDataNodesRestartImmediately
import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
/**
* The last block is truncated at mid. (non copy-on-truncate)
* dn0, dn1 are restarted immediately after truncate.
*/
@Test(timeout=60000)
public void testTruncateWithDataNodesRestartImmediately() throws Exception {
int startingFileSize = 3 * BLOCK_SIZE;
byte[] contents = AppendTestUtil.initBuffer(startingFileSize);
final Path parent = new Path("/test");
final Path p = new Path(parent, "testTruncateWithDataNodesRestartImmediately");
writeContents(contents, startingFileSize, p);
LocatedBlock oldBlock = getLocatedBlocks(p).getLastLocatedBlock();
int dn0 = 0;
int dn1 = 1;
int toTruncateLength = 1;
int newLength = startingFileSize - toTruncateLength;
boolean isReady = fs.truncate(p, newLength);
assertFalse(isReady);
cluster.restartDataNode(dn0, true, true);
cluster.restartDataNode(dn1, true, true);
cluster.waitActive();
checkBlockRecovery(p);
LocatedBlock newBlock = getLocatedBlocks(p).getLastLocatedBlock();
/*
* For non copy-on-truncate, the truncated block id is the same, but the
* GS should increase.
*/
assertEquals(newBlock.getBlock().getBlockId(),
oldBlock.getBlock().getBlockId());
assertEquals(newBlock.getBlock().getGenerationStamp(),
oldBlock.getBlock().getGenerationStamp() + 1);
// Wait replicas come to 3
DFSTestUtil.waitReplication(fs, p, REPLICATION);
// Old replica is disregarded and replaced with the truncated one on dn0
assertEquals(cluster.getBlockFile(dn0, newBlock.getBlock()).length(),
newBlock.getBlockSize());
assertTrue(cluster.getBlockMetadataFile(dn0,
newBlock.getBlock()).getName().endsWith(
newBlock.getBlock().getGenerationStamp() + ".meta"));
// Old replica is disregarded and replaced with the truncated one on dn1
assertEquals(cluster.getBlockFile(dn1, newBlock.getBlock()).length(),
newBlock.getBlockSize());
assertTrue(cluster.getBlockMetadataFile(dn1,
newBlock.getBlock()).getName().endsWith(
newBlock.getBlock().getGenerationStamp() + ".meta"));
// Validate the file
FileStatus fileStatus = fs.getFileStatus(p);
assertThat(fileStatus.getLen(), is((long) newLength));
checkFullFile(p, newLength, contents);
fs.delete(parent, true);
}