当前位置: 首页>>代码示例>>Java>>正文


Java AppendTestUtil.initBuffer方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.AppendTestUtil.initBuffer方法的典型用法代码示例。如果您正苦于以下问题:Java AppendTestUtil.initBuffer方法的具体用法?Java AppendTestUtil.initBuffer怎么用?Java AppendTestUtil.initBuffer使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.AppendTestUtil的用法示例。


在下文中一共展示了AppendTestUtil.initBuffer方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: doTestReceiveAndMirror

import org.apache.hadoop.hdfs.AppendTestUtil; //导入方法依赖的package包/类
private void doTestReceiveAndMirror(PacketReceiver pr,
    int dataLen, int checksumsLen) throws IOException {
  final byte[] DATA = AppendTestUtil.initBuffer(dataLen);
  final byte[] CHECKSUMS = AppendTestUtil.initBuffer(checksumsLen);

  byte[] packet = prepareFakePacket(DATA, CHECKSUMS);
  ByteArrayInputStream in = new ByteArrayInputStream(packet);
  
  pr.receiveNextPacket(in);
  
  ByteBuffer parsedData = pr.getDataSlice();
  assertArrayEquals(DATA, remainingAsArray(parsedData));

  ByteBuffer parsedChecksums = pr.getChecksumSlice();
  assertArrayEquals(CHECKSUMS, remainingAsArray(parsedChecksums));
  
  PacketHeader header = pr.getHeader();
  assertEquals(SEQNO, header.getSeqno());
  assertEquals(OFFSET_IN_BLOCK, header.getOffsetInBlock());
  assertEquals(dataLen + checksumsLen + Ints.BYTES, header.getPacketLen());
  
  // Mirror the packet to an output stream and make sure it matches
  // the packet we sent.
  ByteArrayOutputStream mirrored = new ByteArrayOutputStream();
  mirrored = Mockito.spy(mirrored);

  pr.mirrorPacketTo(new DataOutputStream(mirrored));
  // The write should be done in a single call. Otherwise we may hit
  // nasty interactions with nagling (eg HDFS-4049).
  Mockito.verify(mirrored, Mockito.times(1))
    .write(Mockito.<byte[]>any(), Mockito.anyInt(),
        Mockito.eq(packet.length));
  Mockito.verifyNoMoreInteractions(mirrored);

  assertArrayEquals(packet, mirrored.toByteArray());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:37,代码来源:TestPacketReceiver.java

示例2: testBasicTruncate

import org.apache.hadoop.hdfs.AppendTestUtil; //导入方法依赖的package包/类
/**
 * Truncate files of different sizes byte by byte.
 */
@Test
public void testBasicTruncate() throws IOException {
  int startingFileSize = 3 * BLOCK_SIZE;

  Path parent = new Path("/test");
  fs.mkdirs(parent);
  fs.setQuota(parent, 100, 1000);
  byte[] contents = AppendTestUtil.initBuffer(startingFileSize);
  for (int fileLength = startingFileSize; fileLength > 0;
                                          fileLength -= BLOCK_SIZE - 1) {
    for (int toTruncate = 0; toTruncate <= fileLength; toTruncate++) {
      final Path p = new Path(parent, "testBasicTruncate" + fileLength);
      writeContents(contents, fileLength, p);

      int newLength = fileLength - toTruncate;
      boolean isReady = fs.truncate(p, newLength);
      LOG.info("fileLength=" + fileLength + ", newLength=" + newLength
          + ", toTruncate=" + toTruncate + ", isReady=" + isReady);

      assertEquals("File must be closed for zero truncate"
          + " or truncating at the block boundary",
          isReady, toTruncate == 0 || newLength % BLOCK_SIZE == 0);
      if (!isReady) {
        checkBlockRecovery(p);
      }

      ContentSummary cs = fs.getContentSummary(parent);
      assertEquals("Bad disk space usage",
          cs.getSpaceConsumed(), newLength * REPLICATION);
      // validate the file content
      checkFullFile(p, newLength, contents);
    }
  }
  fs.delete(parent, true);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:39,代码来源:TestFileTruncate.java

示例3: testTruncateWithDataNodesShutdownImmediately

import org.apache.hadoop.hdfs.AppendTestUtil; //导入方法依赖的package包/类
/**
 * The last block is truncated at mid. (non copy-on-truncate)
 * shutdown the datanodes immediately after truncate.
 */
@Test(timeout=60000)
public void testTruncateWithDataNodesShutdownImmediately() throws Exception {
  int startingFileSize = 3 * BLOCK_SIZE;
  byte[] contents = AppendTestUtil.initBuffer(startingFileSize);
  final Path parent = new Path("/test");
  final Path p = new Path(parent, "testTruncateWithDataNodesShutdownImmediately");

  writeContents(contents, startingFileSize, p);

  int toTruncateLength = 1;
  int newLength = startingFileSize - toTruncateLength;
  boolean isReady = fs.truncate(p, newLength);
  assertFalse(isReady);

  cluster.shutdownDataNodes();
  cluster.setDataNodesDead();
  try {
    for(int i = 0; i < SUCCESS_ATTEMPTS && cluster.isDataNodeUp(); i++) {
      Thread.sleep(SLEEP);
    }
    assertFalse("All DataNodes should be down.", cluster.isDataNodeUp());
    LocatedBlocks blocks = getLocatedBlocks(p);
    assertTrue(blocks.isUnderConstruction());
  } finally {
    cluster.startDataNodes(conf, DATANODE_NUM, true,
        StartupOption.REGULAR, null);
    cluster.waitActive();
  }
  checkBlockRecovery(p);

  fs.delete(parent, true);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:37,代码来源:TestFileTruncate.java

示例4: testTruncateEditLogLoad

import org.apache.hadoop.hdfs.AppendTestUtil; //导入方法依赖的package包/类
/**
 * EditLogOp load test for Truncate.
 */
@Test
public void testTruncateEditLogLoad() throws IOException {
  // purge previously accumulated edits
  fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
  fs.saveNamespace();
  fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);

  int startingFileSize = 2 * BLOCK_SIZE + BLOCK_SIZE / 2;
  int toTruncate = 1;
  final String s = "/testTruncateEditLogLoad";
  final Path p = new Path(s);
  byte[] contents = AppendTestUtil.initBuffer(startingFileSize);
  writeContents(contents, startingFileSize, p);

  int newLength = startingFileSize - toTruncate;
  boolean isReady = fs.truncate(p, newLength);
  assertThat("truncate should have triggered block recovery.",
      isReady, is(false));

  cluster.restartNameNode();

  String holder = UserGroupInformation.getCurrentUser().getUserName();
  cluster.getNamesystem().recoverLease(s, holder, "");

  checkBlockRecovery(p);
  checkFullFile(p, newLength, contents);
  fs.delete(p, false);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:32,代码来源:TestFileTruncate.java

示例5: testTruncate4Symlink

import org.apache.hadoop.hdfs.AppendTestUtil; //导入方法依赖的package包/类
@Test
public void testTruncate4Symlink() throws IOException {
  final int fileLength = 3 * BLOCK_SIZE;

  final Path parent = new Path("/test");
  fs.mkdirs(parent);
  final byte[] contents = AppendTestUtil.initBuffer(fileLength);
  final Path file = new Path(parent, "testTruncate4Symlink");
  writeContents(contents, fileLength, file);

  final Path link = new Path(parent, "link");
  fs.createSymlink(file, link, false);

  final int newLength = fileLength/3;
  boolean isReady = fs.truncate(link, newLength);

  assertTrue("Recovery is not expected.", isReady);

  FileStatus fileStatus = fs.getFileStatus(file);
  assertThat(fileStatus.getLen(), is((long) newLength));

  ContentSummary cs = fs.getContentSummary(parent);
  assertEquals("Bad disk space usage",
      cs.getSpaceConsumed(), newLength * REPLICATION);
  // validate the file content
  checkFullFile(file, newLength, contents);

  fs.delete(parent, true);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:30,代码来源:TestFileTruncate.java

示例6: testHSyncBlockBoundary

import org.apache.hadoop.hdfs.AppendTestUtil; //导入方法依赖的package包/类
/** Test hsync on an exact block boundary */
@Test
public void testHSyncBlockBoundary() throws Exception {
  Configuration conf = new HdfsConfiguration();
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
  final FileSystem fs = cluster.getFileSystem();
  
  final Path p = new Path("/testHSyncBlockBoundary/foo");
  final int len = 1 << 16;
  final byte[] fileContents = AppendTestUtil.initBuffer(len);
  FSDataOutputStream out = fs.create(p, FsPermission.getDefault(),
      EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE, CreateFlag.SYNC_BLOCK),
      4096, (short) 1, len, null);
  // fill exactly one block (tests the SYNC_BLOCK case) and flush
  out.write(fileContents, 0, len);
  out.hflush();
  // the full block should have caused a sync
  checkSyncMetric(cluster, 1);
  out.hsync();
  // first on block again
  checkSyncMetric(cluster, 1);
  // write one more byte and sync again
  out.write(1);
  out.hsync();
  checkSyncMetric(cluster, 2);
  out.close();
  checkSyncMetric(cluster, 3);
  cluster.shutdown();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:30,代码来源:TestHSync.java

示例7: testBasicTruncate

import org.apache.hadoop.hdfs.AppendTestUtil; //导入方法依赖的package包/类
/**
 * Truncate files of different sizes byte by byte.
 */
@Test
public void testBasicTruncate() throws IOException {
  int startingFileSize = 3 * BLOCK_SIZE;

  fs.mkdirs(parent);
  fs.setQuota(parent, 100, 1000);
  byte[] contents = AppendTestUtil.initBuffer(startingFileSize);
  for (int fileLength = startingFileSize; fileLength > 0;
                                          fileLength -= BLOCK_SIZE - 1) {
    for (int toTruncate = 0; toTruncate <= fileLength; toTruncate++) {
      final Path p = new Path(parent, "testBasicTruncate" + fileLength);
      writeContents(contents, fileLength, p);

      int newLength = fileLength - toTruncate;
      boolean isReady = fs.truncate(p, newLength);
      LOG.info("fileLength=" + fileLength + ", newLength=" + newLength
          + ", toTruncate=" + toTruncate + ", isReady=" + isReady);

      assertEquals("File must be closed for zero truncate"
          + " or truncating at the block boundary",
          isReady, toTruncate == 0 || newLength % BLOCK_SIZE == 0);
      if (!isReady) {
        checkBlockRecovery(p);
      }

      ContentSummary cs = fs.getContentSummary(parent);
      assertEquals("Bad disk space usage",
          cs.getSpaceConsumed(), newLength * REPLICATION);
      // validate the file content
      checkFullFile(p, newLength, contents);
    }
  }
  fs.delete(parent, true);
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:38,代码来源:TestFileTruncate.java

示例8: testTruncateWithDataNodesShutdownImmediately

import org.apache.hadoop.hdfs.AppendTestUtil; //导入方法依赖的package包/类
/**
 * The last block is truncated at mid. (non copy-on-truncate)
 * shutdown the datanodes immediately after truncate.
 */
@Test(timeout=60000)
public void testTruncateWithDataNodesShutdownImmediately() throws Exception {
  int startingFileSize = 3 * BLOCK_SIZE;
  byte[] contents = AppendTestUtil.initBuffer(startingFileSize);
  final Path p = new Path(parent, "testTruncateWithDataNodesShutdownImmediately");

  writeContents(contents, startingFileSize, p);

  int toTruncateLength = 1;
  int newLength = startingFileSize - toTruncateLength;
  boolean isReady = fs.truncate(p, newLength);
  assertFalse(isReady);

  cluster.shutdownDataNodes();
  cluster.setDataNodesDead();
  try {
    for(int i = 0; i < SUCCESS_ATTEMPTS && cluster.isDataNodeUp(); i++) {
      Thread.sleep(SLEEP);
    }
    assertFalse("All DataNodes should be down.", cluster.isDataNodeUp());
    LocatedBlocks blocks = getLocatedBlocks(p);
    assertTrue(blocks.isUnderConstruction());
  } finally {
    cluster.startDataNodes(conf, DATANODE_NUM, true,
        StartupOption.REGULAR, null);
    cluster.waitActive();
  }
  checkBlockRecovery(p);

  fs.delete(parent, true);
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:36,代码来源:TestFileTruncate.java

示例9: testTruncate4Symlink

import org.apache.hadoop.hdfs.AppendTestUtil; //导入方法依赖的package包/类
@Test
public void testTruncate4Symlink() throws IOException {
  final int fileLength = 3 * BLOCK_SIZE;

  fs.mkdirs(parent);
  final byte[] contents = AppendTestUtil.initBuffer(fileLength);
  final Path file = new Path(parent, "testTruncate4Symlink");
  writeContents(contents, fileLength, file);

  final Path link = new Path(parent, "link");
  fs.createSymlink(file, link, false);

  final int newLength = fileLength/3;
  boolean isReady = fs.truncate(link, newLength);

  assertTrue("Recovery is not expected.", isReady);

  FileStatus fileStatus = fs.getFileStatus(file);
  assertThat(fileStatus.getLen(), is((long) newLength));

  ContentSummary cs = fs.getContentSummary(parent);
  assertEquals("Bad disk space usage",
      cs.getSpaceConsumed(), newLength * REPLICATION);
  // validate the file content
  checkFullFile(file, newLength, contents);

  fs.delete(parent, true);
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:29,代码来源:TestFileTruncate.java

示例10: doTestReceiveAndMirror

import org.apache.hadoop.hdfs.AppendTestUtil; //导入方法依赖的package包/类
private void doTestReceiveAndMirror(PacketReceiver pr,
    int dataLen, int checksumsLen) throws IOException {
  final byte[] DATA = AppendTestUtil.initBuffer(dataLen);
  final byte[] CHECKSUMS = AppendTestUtil.initBuffer(checksumsLen);

  byte[] packet = prepareFakePacket(DATA, CHECKSUMS);
  ByteArrayInputStream in = new ByteArrayInputStream(packet);
  
  pr.receiveNextPacket(in);
  
  ByteBuffer parsedData = pr.getDataSlice();
  assertArrayEquals(DATA, remainingAsArray(parsedData));

  ByteBuffer parsedChecksums = pr.getChecksumSlice();
  assertArrayEquals(CHECKSUMS, remainingAsArray(parsedChecksums));
  
  PacketHeader header = pr.getHeader();
  assertEquals(SEQNO, header.getSeqno());
  assertEquals(OFFSET_IN_BLOCK, header.getOffsetInBlock());
  
  // Mirror the packet to an output stream and make sure it matches
  // the packet we sent.
  ByteArrayOutputStream mirrored = new ByteArrayOutputStream();
  mirrored = Mockito.spy(mirrored);

  pr.mirrorPacketTo(new DataOutputStream(mirrored));
  // The write should be done in a single call. Otherwise we may hit
  // nasty interactions with nagling (eg HDFS-4049).
  Mockito.verify(mirrored, Mockito.times(1))
    .write(Mockito.<byte[]>any(), Mockito.anyInt(),
        Mockito.eq(packet.length));
  Mockito.verifyNoMoreInteractions(mirrored);

  assertArrayEquals(packet, mirrored.toByteArray());
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:36,代码来源:TestPacketReceiver.java

示例11: doTestReceiveAndMirror

import org.apache.hadoop.hdfs.AppendTestUtil; //导入方法依赖的package包/类
private void doTestReceiveAndMirror(PacketReceiver pr, int dataLen,
    int checksumsLen) throws IOException {
  final byte[] DATA = AppendTestUtil.initBuffer(dataLen);
  final byte[] CHECKSUMS = AppendTestUtil.initBuffer(checksumsLen);

  byte[] packet = prepareFakePacket(DATA, CHECKSUMS);
  ByteArrayInputStream in = new ByteArrayInputStream(packet);
  
  pr.receiveNextPacket(in);
  
  ByteBuffer parsedData = pr.getDataSlice();
  assertArrayEquals(DATA, remainingAsArray(parsedData));

  ByteBuffer parsedChecksums = pr.getChecksumSlice();
  assertArrayEquals(CHECKSUMS, remainingAsArray(parsedChecksums));
  
  PacketHeader header = pr.getHeader();
  assertEquals(SEQNO, header.getSeqno());
  assertEquals(OFFSET_IN_BLOCK, header.getOffsetInBlock());
  
  // Mirror the packet to an output stream and make sure it matches
  // the packet we sent.
  ByteArrayOutputStream mirrored = new ByteArrayOutputStream();
  mirrored = Mockito.spy(mirrored);

  pr.mirrorPacketTo(new DataOutputStream(mirrored));
  // The write should be done in a single call. Otherwise we may hit
  // nasty interactions with nagling (eg HDFS-4049).
  Mockito.verify(mirrored, Mockito.times(1))
      .write(Mockito.<byte[]>any(), Mockito.anyInt(),
          Mockito.eq(packet.length));
  Mockito.verifyNoMoreInteractions(mirrored);

  assertArrayEquals(packet, mirrored.toByteArray());
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:36,代码来源:TestPacketReceiver.java

示例12: testHSyncBlockBoundary

import org.apache.hadoop.hdfs.AppendTestUtil; //导入方法依赖的package包/类
/**
 * Test hsync on an exact block boundary
 */
@Test
public void testHSyncBlockBoundary() throws Exception {
  Configuration conf = new HdfsConfiguration();
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
  final FileSystem fs = cluster.getFileSystem();
  
  final Path p = new Path("/testHSyncBlockBoundary/foo");
  final int len = 1 << 16;
  final byte[] fileContents = AppendTestUtil.initBuffer(len);
  FSDataOutputStream out = fs.create(p, FsPermission.getDefault(), EnumSet
      .of(CreateFlag.CREATE, CreateFlag.OVERWRITE, CreateFlag.SYNC_BLOCK),
      4096, (short) 1, len, null);
  // fill exactly one block (tests the SYNC_BLOCK case) and flush
  out.write(fileContents, 0, len);
  out.hflush();
  // the full block should have caused a sync
  checkSyncMetric(cluster, 1);
  out.hsync();
  // first on block again
  checkSyncMetric(cluster, 1);
  // write one more byte and sync again
  out.write(1);
  out.hsync();
  checkSyncMetric(cluster, 2);
  out.close();
  checkSyncMetric(cluster, 3);
  cluster.shutdown();
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:32,代码来源:TestHSync.java

示例13: testTruncateWithDataNodesRestart

import org.apache.hadoop.hdfs.AppendTestUtil; //导入方法依赖的package包/类
/**
 * The last block is truncated at mid. (non copy-on-truncate)
 * dn0 is shutdown before truncate and restart after truncate successful.
 */
@Test(timeout=60000)
public void testTruncateWithDataNodesRestart() throws Exception {
  int startingFileSize = 3 * BLOCK_SIZE;
  byte[] contents = AppendTestUtil.initBuffer(startingFileSize);
  final Path parent = new Path("/test");
  final Path p = new Path(parent, "testTruncateWithDataNodesRestart");

  writeContents(contents, startingFileSize, p);
  LocatedBlock oldBlock = getLocatedBlocks(p).getLastLocatedBlock();

  int dn = 0;
  int toTruncateLength = 1;
  int newLength = startingFileSize - toTruncateLength;
  cluster.getDataNodes().get(dn).shutdown();
  try {
    boolean isReady = fs.truncate(p, newLength);
    assertFalse(isReady);
  } finally {
    cluster.restartDataNode(dn, true, true);
    cluster.waitActive();
  }
  checkBlockRecovery(p);

  LocatedBlock newBlock = getLocatedBlocks(p).getLastLocatedBlock();
  /*
   * For non copy-on-truncate, the truncated block id is the same, but the 
   * GS should increase.
   * The truncated block will be replicated to dn0 after it restarts.
   */
  assertEquals(newBlock.getBlock().getBlockId(), 
      oldBlock.getBlock().getBlockId());
  assertEquals(newBlock.getBlock().getGenerationStamp(),
      oldBlock.getBlock().getGenerationStamp() + 1);

  // Wait replicas come to 3
  DFSTestUtil.waitReplication(fs, p, REPLICATION);
  // Old replica is disregarded and replaced with the truncated one
  assertEquals(cluster.getBlockFile(dn, newBlock.getBlock()).length(), 
      newBlock.getBlockSize());
  assertTrue(cluster.getBlockMetadataFile(dn, 
      newBlock.getBlock()).getName().endsWith(
          newBlock.getBlock().getGenerationStamp() + ".meta"));

  // Validate the file
  FileStatus fileStatus = fs.getFileStatus(p);
  assertThat(fileStatus.getLen(), is((long) newLength));
  checkFullFile(p, newLength, contents);

  fs.delete(parent, true);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:55,代码来源:TestFileTruncate.java

示例14: testCopyOnTruncateWithDataNodesRestart

import org.apache.hadoop.hdfs.AppendTestUtil; //导入方法依赖的package包/类
/**
 * The last block is truncated at mid. (copy-on-truncate)
 * dn1 is shutdown before truncate and restart after truncate successful.
 */
@Test(timeout=60000)
public void testCopyOnTruncateWithDataNodesRestart() throws Exception {
  int startingFileSize = 3 * BLOCK_SIZE;
  byte[] contents = AppendTestUtil.initBuffer(startingFileSize);
  final Path parent = new Path("/test");
  final Path p = new Path(parent, "testCopyOnTruncateWithDataNodesRestart");

  writeContents(contents, startingFileSize, p);
  LocatedBlock oldBlock = getLocatedBlocks(p).getLastLocatedBlock();
  fs.allowSnapshot(parent);
  fs.createSnapshot(parent, "ss0");

  int dn = 1;
  int toTruncateLength = 1;
  int newLength = startingFileSize - toTruncateLength;
  cluster.getDataNodes().get(dn).shutdown();
  try {
    boolean isReady = fs.truncate(p, newLength);
    assertFalse(isReady);
  } finally {
    cluster.restartDataNode(dn, true, true);
    cluster.waitActive();
  }
  checkBlockRecovery(p);

  LocatedBlock newBlock = getLocatedBlocks(p).getLastLocatedBlock();
  /*
   * For copy-on-truncate, new block is made with new block id and new GS.
   * The replicas of the new block is 2, then it will be replicated to dn1.
   */
  assertNotEquals(newBlock.getBlock().getBlockId(), 
      oldBlock.getBlock().getBlockId());
  assertEquals(newBlock.getBlock().getGenerationStamp(),
      oldBlock.getBlock().getGenerationStamp() + 1);

  // Wait replicas come to 3
  DFSTestUtil.waitReplication(fs, p, REPLICATION);
  // New block is replicated to dn1
  assertEquals(cluster.getBlockFile(dn, newBlock.getBlock()).length(), 
      newBlock.getBlockSize());
  // Old replica exists too since there is snapshot
  assertEquals(cluster.getBlockFile(dn, oldBlock.getBlock()).length(), 
      oldBlock.getBlockSize());
  assertTrue(cluster.getBlockMetadataFile(dn, 
      oldBlock.getBlock()).getName().endsWith(
          oldBlock.getBlock().getGenerationStamp() + ".meta"));

  // Validate the file
  FileStatus fileStatus = fs.getFileStatus(p);
  assertThat(fileStatus.getLen(), is((long) newLength));
  checkFullFile(p, newLength, contents);

  fs.deleteSnapshot(parent, "ss0");
  fs.delete(parent, true);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:60,代码来源:TestFileTruncate.java

示例15: testTruncateWithDataNodesRestartImmediately

import org.apache.hadoop.hdfs.AppendTestUtil; //导入方法依赖的package包/类
/**
 * The last block is truncated at mid. (non copy-on-truncate)
 * dn0, dn1 are restarted immediately after truncate.
 */
@Test(timeout=60000)
public void testTruncateWithDataNodesRestartImmediately() throws Exception {
  int startingFileSize = 3 * BLOCK_SIZE;
  byte[] contents = AppendTestUtil.initBuffer(startingFileSize);
  final Path parent = new Path("/test");
  final Path p = new Path(parent, "testTruncateWithDataNodesRestartImmediately");

  writeContents(contents, startingFileSize, p);
  LocatedBlock oldBlock = getLocatedBlocks(p).getLastLocatedBlock();

  int dn0 = 0;
  int dn1 = 1;
  int toTruncateLength = 1;
  int newLength = startingFileSize - toTruncateLength;
  boolean isReady = fs.truncate(p, newLength);
  assertFalse(isReady);

  cluster.restartDataNode(dn0, true, true);
  cluster.restartDataNode(dn1, true, true);
  cluster.waitActive();
  checkBlockRecovery(p);

  LocatedBlock newBlock = getLocatedBlocks(p).getLastLocatedBlock();
  /*
   * For non copy-on-truncate, the truncated block id is the same, but the 
   * GS should increase.
   */
  assertEquals(newBlock.getBlock().getBlockId(), 
      oldBlock.getBlock().getBlockId());
  assertEquals(newBlock.getBlock().getGenerationStamp(),
      oldBlock.getBlock().getGenerationStamp() + 1);

  // Wait replicas come to 3
  DFSTestUtil.waitReplication(fs, p, REPLICATION);
  // Old replica is disregarded and replaced with the truncated one on dn0
  assertEquals(cluster.getBlockFile(dn0, newBlock.getBlock()).length(), 
      newBlock.getBlockSize());
  assertTrue(cluster.getBlockMetadataFile(dn0, 
      newBlock.getBlock()).getName().endsWith(
          newBlock.getBlock().getGenerationStamp() + ".meta"));

  // Old replica is disregarded and replaced with the truncated one on dn1
  assertEquals(cluster.getBlockFile(dn1, newBlock.getBlock()).length(), 
      newBlock.getBlockSize());
  assertTrue(cluster.getBlockMetadataFile(dn1, 
      newBlock.getBlock()).getName().endsWith(
          newBlock.getBlock().getGenerationStamp() + ".meta"));

  // Validate the file
  FileStatus fileStatus = fs.getFileStatus(p);
  assertThat(fileStatus.getLen(), is((long) newLength));
  checkFullFile(p, newLength, contents);

  fs.delete(parent, true);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:60,代码来源:TestFileTruncate.java


注:本文中的org.apache.hadoop.hdfs.AppendTestUtil.initBuffer方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。