当前位置: 首页>>代码示例>>Java>>正文


Java DFSTestUtil.appendFile方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.DFSTestUtil.appendFile方法的典型用法代码示例。如果您正苦于以下问题:Java DFSTestUtil.appendFile方法的具体用法?Java DFSTestUtil.appendFile怎么用?Java DFSTestUtil.appendFile使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.DFSTestUtil的用法示例。


在下文中一共展示了DFSTestUtil.appendFile方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testDiffReportWithRenameAndAppend

import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
/**
 * Rename a file and then append some data to it
 */
@Test
public void testDiffReportWithRenameAndAppend() throws Exception {
  final Path root = new Path("/");
  final Path foo = new Path(root, "foo");
  DFSTestUtil.createFile(hdfs, foo, BLOCKSIZE, REPLICATION, seed);

  SnapshotTestHelper.createSnapshot(hdfs, root, "s0");
  final Path bar = new Path(root, "bar");
  hdfs.rename(foo, bar);
  DFSTestUtil.appendFile(hdfs, bar, 10); // append 10 bytes
  SnapshotTestHelper.createSnapshot(hdfs, root, "s1");

  // we always put modification on the file before rename
  verifyDiffReport(root, "s0", "s1",
      new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("")),
      new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("foo")),
      new DiffReportEntry(DiffType.RENAME, DFSUtil.string2Bytes("foo"),
          DFSUtil.string2Bytes("bar")));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:23,代码来源:TestSnapshotDiffReport.java

示例2: changeData

import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
/**
 * make some changes under the given directory (created in the above way).
 * 1. rename dir/foo/d1 to dir/bar/d1
 * 2. delete dir/bar/d1/f3
 * 3. rename dir/foo to /dir/bar/d1/foo
 * 4. delete dir/bar/d1/foo/f1
 * 5. create file dir/bar/d1/foo/f1 whose size is 2*BLOCK_SIZE
 * 6. append one BLOCK to file dir/bar/f2
 * 7. rename dir/bar to dir/foo
 *
 * Thus after all these ops the subtree looks like this:
 *                       dir/
 *                       foo/
 *                 d1/    f2(A)    d2/
 *                foo/             f4
 *                f1(new)
 */
private void changeData(Path dir) throws Exception {
  final Path foo = new Path(dir, "foo");
  final Path bar = new Path(dir, "bar");
  final Path d1 = new Path(foo, "d1");
  final Path f2 = new Path(bar, "f2");

  final Path bar_d1 = new Path(bar, "d1");
  dfs.rename(d1, bar_d1);
  final Path f3 = new Path(bar_d1, "f3");
  dfs.delete(f3, true);
  final Path newfoo = new Path(bar_d1, "foo");
  dfs.rename(foo, newfoo);
  final Path f1 = new Path(newfoo, "f1");
  dfs.delete(f1, true);
  DFSTestUtil.createFile(dfs, f1, 2 * BLOCK_SIZE, DATA_NUM, 0);
  DFSTestUtil.appendFile(dfs, f2, (int) BLOCK_SIZE);
  dfs.rename(bar, new Path(dir, "foo"));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:36,代码来源:TestDistCpSync.java

示例3: testUpdateQuotaForFSync

import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
/**
 * Test if the quota can be correctly updated when file length is updated
 * through fsync
 */
@Test (timeout=60000)
public void testUpdateQuotaForFSync() throws Exception {
  final Path foo = new Path("/foo");
  final Path bar = new Path(foo, "bar");
  DFSTestUtil.createFile(dfs, bar, BLOCKSIZE, REPLICATION, 0L);
  dfs.setQuota(foo, Long.MAX_VALUE - 1, Long.MAX_VALUE - 1);

  FSDataOutputStream out = dfs.append(bar);
  out.write(new byte[BLOCKSIZE / 4]);
  ((DFSOutputStream) out.getWrappedStream()).hsync(EnumSet.of(HdfsDataOutputStream.SyncFlag.UPDATE_LENGTH));

  INodeDirectory fooNode = fsdir.getINode4Write(foo.toString()).asDirectory();
  QuotaCounts quota = fooNode.getDirectoryWithQuotaFeature()
      .getSpaceConsumed();
  long ns = quota.getNameSpace();
  long ds = quota.getStorageSpace();
  assertEquals(2, ns); // foo and bar
  assertEquals(BLOCKSIZE * 2 * REPLICATION, ds); // file is under construction

  out.write(new byte[BLOCKSIZE / 4]);
  out.close();

  fooNode = fsdir.getINode4Write(foo.toString()).asDirectory();
  quota = fooNode.getDirectoryWithQuotaFeature().getSpaceConsumed();
  ns = quota.getNameSpace();
  ds = quota.getStorageSpace();
  assertEquals(2, ns);
  assertEquals((BLOCKSIZE + BLOCKSIZE / 2) * REPLICATION, ds);

  // append another block
  DFSTestUtil.appendFile(dfs, bar, BLOCKSIZE);

  quota = fooNode.getDirectoryWithQuotaFeature().getSpaceConsumed();
  ns = quota.getNameSpace();
  ds = quota.getStorageSpace();
  assertEquals(2, ns); // foo and bar
  assertEquals((BLOCKSIZE * 2 + BLOCKSIZE / 2) * REPLICATION, ds);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:43,代码来源:TestDiskspaceQuotaUpdate.java

示例4: testAppendOverStorageQuota

import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
/**
 * Test append over storage quota does not mark file as UC or create lease
 */
@Test (timeout=60000)
public void testAppendOverStorageQuota() throws Exception {
  final Path dir = new Path("/TestAppendOverQuota");
  final Path file = new Path(dir, "file");

  // create partial block file
  dfs.mkdirs(dir);
  DFSTestUtil.createFile(dfs, file, BLOCKSIZE/2, REPLICATION, seed);

  // lower quota to cause exception when appending to partial block
  dfs.setQuota(dir, Long.MAX_VALUE - 1, 1);
  final INodeDirectory dirNode = fsdir.getINode4Write(dir.toString())
      .asDirectory();
  final long spaceUsed = dirNode.getDirectoryWithQuotaFeature()
      .getSpaceConsumed().getStorageSpace();
  try {
    DFSTestUtil.appendFile(dfs, file, BLOCKSIZE);
    Assert.fail("append didn't fail");
  } catch (DSQuotaExceededException e) {
    // ignore
  }

  // check that the file exists, isn't UC, and has no dangling lease
  INodeFile inode = fsdir.getINode(file.toString()).asFile();
  Assert.assertNotNull(inode);
  Assert.assertFalse("should not be UC", inode.isUnderConstruction());
  Assert.assertNull("should not have a lease", cluster.getNamesystem().getLeaseManager().getLeaseByPath(file.toString()));
  // make sure the quota usage is unchanged
  final long newSpaceUsed = dirNode.getDirectoryWithQuotaFeature()
      .getSpaceConsumed().getStorageSpace();
  assertEquals(spaceUsed, newSpaceUsed);
  // make sure edits aren't corrupted
  dfs.recoverLease(file);
  cluster.restartNameNodes();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:39,代码来源:TestDiskspaceQuotaUpdate.java

示例5: testAppendOverTypeQuota

import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
/**
 * Test append over a specific type of storage quota does not mark file as
 * UC or create a lease
 */
@Test (timeout=60000)
public void testAppendOverTypeQuota() throws Exception {
  final Path dir = new Path("/TestAppendOverTypeQuota");
  final Path file = new Path(dir, "file");

  // create partial block file
  dfs.mkdirs(dir);
  // set the storage policy on dir
  dfs.setStoragePolicy(dir, HdfsConstants.ONESSD_STORAGE_POLICY_NAME);
  DFSTestUtil.createFile(dfs, file, BLOCKSIZE/2, REPLICATION, seed);

  // set quota of SSD to 1L
  dfs.setQuotaByStorageType(dir, StorageType.SSD, 1L);
  final INodeDirectory dirNode = fsdir.getINode4Write(dir.toString())
      .asDirectory();
  final long spaceUsed = dirNode.getDirectoryWithQuotaFeature()
      .getSpaceConsumed().getStorageSpace();
  try {
    DFSTestUtil.appendFile(dfs, file, BLOCKSIZE);
    Assert.fail("append didn't fail");
  } catch (RemoteException e) {
    assertTrue(e.getClassName().contains("QuotaByStorageTypeExceededException"));
  }

  // check that the file exists, isn't UC, and has no dangling lease
  INodeFile inode = fsdir.getINode(file.toString()).asFile();
  Assert.assertNotNull(inode);
  Assert.assertFalse("should not be UC", inode.isUnderConstruction());
  Assert.assertNull("should not have a lease", cluster.getNamesystem()
      .getLeaseManager().getLeaseByPath(file.toString()));
  // make sure the quota usage is unchanged
  final long newSpaceUsed = dirNode.getDirectoryWithQuotaFeature()
      .getSpaceConsumed().getStorageSpace();
  assertEquals(spaceUsed, newSpaceUsed);
  // make sure edits aren't corrupted
  dfs.recoverLease(file);
  cluster.restartNameNodes();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:43,代码来源:TestDiskspaceQuotaUpdate.java

示例6: testQuotaByStorageTypeWithFileCreateAppend

import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
@Test(timeout = 60000)
public void testQuotaByStorageTypeWithFileCreateAppend() throws Exception {
  final Path foo = new Path(dir, "foo");
  Path createdFile1 = new Path(foo, "created_file1.data");
  dfs.mkdirs(foo);

  // set storage policy on directory "foo" to ONESSD
  dfs.setStoragePolicy(foo, HdfsConstants.ONESSD_STORAGE_POLICY_NAME);

  // set quota by storage type on directory "foo"
  dfs.setQuotaByStorageType(foo, StorageType.SSD, BLOCKSIZE * 4);
  INode fnode = fsdir.getINode4Write(foo.toString());
  assertTrue(fnode.isDirectory());
  assertTrue(fnode.isQuotaSet());

  // Create file of size 2 * BLOCKSIZE under directory "foo"
  long file1Len = BLOCKSIZE * 2;
  int bufLen = BLOCKSIZE / 16;
  DFSTestUtil.createFile(dfs, createdFile1, bufLen, file1Len, BLOCKSIZE, REPLICATION, seed);

  // Verify space consumed and remaining quota
  long ssdConsumed = fnode.asDirectory().getDirectoryWithQuotaFeature()
      .getSpaceConsumed().getTypeSpaces().get(StorageType.SSD);
  assertEquals(file1Len, ssdConsumed);

  // append several blocks
  int appendLen = BLOCKSIZE * 2;
  DFSTestUtil.appendFile(dfs, createdFile1, appendLen);
  file1Len += appendLen;

  ssdConsumed = fnode.asDirectory().getDirectoryWithQuotaFeature()
      .getSpaceConsumed().getTypeSpaces().get(StorageType.SSD);
  assertEquals(file1Len, ssdConsumed);

  ContentSummary cs = dfs.getContentSummary(foo);
  assertEquals(cs.getSpaceConsumed(), file1Len * REPLICATION);
  assertEquals(cs.getTypeConsumed(StorageType.SSD), file1Len);
  assertEquals(cs.getTypeConsumed(StorageType.DISK), file1Len * 2);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:40,代码来源:TestQuotaByStorageType.java

示例7: prepare

import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
@Override
public void prepare() throws Exception {
  // original size: 2.5 blocks
  DFSTestUtil.createFile(dfs, file, BLOCKSIZE * 2 + BLOCKSIZE / 2,
      REPLICATION, 0L);
  SnapshotTestHelper.createSnapshot(dfs, dir, "s1");

  // truncate to 1.5 block
  dfs.truncate(file, BLOCKSIZE + BLOCKSIZE / 2);
  TestFileTruncate.checkBlockRecovery(file, dfs);

  // append another 1 BLOCK
  DFSTestUtil.appendFile(dfs, file, BLOCKSIZE);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:15,代码来源:TestTruncateQuotaUpdate.java

示例8: testAddVolumesDuringWrite

import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
@Test(timeout=60000)
public void testAddVolumesDuringWrite()
    throws IOException, InterruptedException, TimeoutException,
    ReconfigurationException {
  startDFSCluster(1, 1);
  String bpid = cluster.getNamesystem().getBlockPoolId();
  Path testFile = new Path("/test");
  createFile(testFile, 4);  // Each volume has 2 blocks.

  addVolumes(2);

  // Continue to write the same file, thus the new volumes will have blocks.
  DFSTestUtil.appendFile(cluster.getFileSystem(), testFile, BLOCK_SIZE * 8);
  verifyFileLength(cluster.getFileSystem(), testFile, 8 + 4);
  // After appending data, there should be [2, 2, 4, 4] blocks in each volume
  // respectively.
  List<Integer> expectedNumBlocks = Arrays.asList(2, 2, 4, 4);

  List<Map<DatanodeStorage, BlockListAsLongs>> blockReports =
      cluster.getAllBlockReports(bpid);
  assertEquals(1, blockReports.size());  // 1 DataNode
  assertEquals(4, blockReports.get(0).size());  // 4 volumes
  Map<DatanodeStorage, BlockListAsLongs> dnReport =
      blockReports.get(0);
  List<Integer> actualNumBlocks = new ArrayList<Integer>();
  for (BlockListAsLongs blockList : dnReport.values()) {
    actualNumBlocks.add(blockList.getNumberOfBlocks());
  }
  Collections.sort(actualNumBlocks);
  assertEquals(expectedNumBlocks, actualNumBlocks);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:32,代码来源:TestDataNodeHotSwapVolumes.java

示例9: testAddVolumesToFederationNN

import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
@Test(timeout=60000)
public void testAddVolumesToFederationNN()
    throws IOException, TimeoutException, InterruptedException,
    ReconfigurationException {
  // Starts a Cluster with 2 NameNode and 3 DataNodes. Each DataNode has 2
  // volumes.
  final int numNameNodes = 2;
  final int numDataNodes = 1;
  startDFSCluster(numNameNodes, numDataNodes);
  Path testFile = new Path("/test");
  // Create a file on the first namespace with 4 blocks.
  createFile(0, testFile, 4);
  // Create a file on the second namespace with 4 blocks.
  createFile(1, testFile, 4);

  // Add 2 volumes to the first DataNode.
  final int numNewVolumes = 2;
  addVolumes(numNewVolumes);

  // Append to the file on the first namespace.
  DFSTestUtil.appendFile(cluster.getFileSystem(0), testFile, BLOCK_SIZE * 8);

  List<List<Integer>> actualNumBlocks = getNumBlocksReport(0);
  assertEquals(cluster.getDataNodes().size(), actualNumBlocks.size());
  List<Integer> blocksOnFirstDN = actualNumBlocks.get(0);
  Collections.sort(blocksOnFirstDN);
  assertEquals(Arrays.asList(2, 2, 4, 4), blocksOnFirstDN);

  // Verify the second namespace also has the new volumes and they are empty.
  actualNumBlocks = getNumBlocksReport(1);
  assertEquals(4, actualNumBlocks.get(0).size());
  assertEquals(numNewVolumes,
      Collections.frequency(actualNumBlocks.get(0), 0));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:35,代码来源:TestDataNodeHotSwapVolumes.java

示例10: testUpdateQuotaForAppend

import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
/**
 * Test if the quota can be correctly updated for append
 */
@Test (timeout=60000)
public void testUpdateQuotaForAppend() throws Exception {
  final Path foo = new Path(dir ,"foo");
  final Path bar = new Path(foo, "bar");
  long currentFileLen = BLOCKSIZE;
  DFSTestUtil.createFile(dfs, bar, currentFileLen, REPLICATION, seed);
  dfs.setQuota(foo, Long.MAX_VALUE-1, Long.MAX_VALUE-1);

  // append half of the block data, the previous file length is at block
  // boundary
  DFSTestUtil.appendFile(dfs, bar, BLOCKSIZE / 2);
  currentFileLen += (BLOCKSIZE / 2);

  INodeDirectory fooNode = fsdir.getINode4Write(foo.toString()).asDirectory();
  assertTrue(fooNode.isQuotaSet());
  QuotaCounts quota = fooNode.getDirectoryWithQuotaFeature()
      .getSpaceConsumed();
  long ns = quota.getNameSpace();
  long ds = quota.getStorageSpace();
  assertEquals(2, ns); // foo and bar
  assertEquals(currentFileLen * REPLICATION, ds);
  ContentSummary c = dfs.getContentSummary(foo);
  assertEquals(c.getSpaceConsumed(), ds);

  // append another block, the previous file length is not at block boundary
  DFSTestUtil.appendFile(dfs, bar, BLOCKSIZE);
  currentFileLen += BLOCKSIZE;

  quota = fooNode.getDirectoryWithQuotaFeature().getSpaceConsumed();
  ns = quota.getNameSpace();
  ds = quota.getStorageSpace();
  assertEquals(2, ns); // foo and bar
  assertEquals(currentFileLen * REPLICATION, ds);
  c = dfs.getContentSummary(foo);
  assertEquals(c.getSpaceConsumed(), ds);

  // append several blocks
  DFSTestUtil.appendFile(dfs, bar, BLOCKSIZE * 3 + BLOCKSIZE / 8);
  currentFileLen += (BLOCKSIZE * 3 + BLOCKSIZE / 8);

  quota = fooNode.getDirectoryWithQuotaFeature().getSpaceConsumed();
  ns = quota.getNameSpace();
  ds = quota.getStorageSpace();
  assertEquals(2, ns); // foo and bar
  assertEquals(currentFileLen * REPLICATION, ds);
  c = dfs.getContentSummary(foo);
  assertEquals(c.getSpaceConsumed(), ds);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:52,代码来源:TestDiskspaceQuotaUpdate.java

示例11: testSaveLoadImageWithAppending

import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
/**
 * Test the fsimage saving/loading while file appending.
 */
@Test (timeout=60000)
public void testSaveLoadImageWithAppending() throws Exception {
  Path sub1 = new Path(dir, "sub1");
  Path sub1file1 = new Path(sub1, "sub1file1");
  Path sub1file2 = new Path(sub1, "sub1file2");
  DFSTestUtil.createFile(hdfs, sub1file1, BLOCKSIZE, REPLICATION, seed);
  DFSTestUtil.createFile(hdfs, sub1file2, BLOCKSIZE, REPLICATION, seed);
  
  // 1. create snapshot s0
  hdfs.allowSnapshot(dir);
  hdfs.createSnapshot(dir, "s0");
  
  // 2. create snapshot s1 before appending sub1file1 finishes
  HdfsDataOutputStream out = appendFileWithoutClosing(sub1file1, BLOCKSIZE);
  out.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
  // also append sub1file2
  DFSTestUtil.appendFile(hdfs, sub1file2, BLOCKSIZE);
  hdfs.createSnapshot(dir, "s1");
  out.close();
  
  // 3. create snapshot s2 before appending finishes
  out = appendFileWithoutClosing(sub1file1, BLOCKSIZE);
  out.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
  hdfs.createSnapshot(dir, "s2");
  out.close();
  
  // 4. save fsimage before appending finishes
  out = appendFileWithoutClosing(sub1file1, BLOCKSIZE);
  out.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
  // dump fsdir
  File fsnBefore = dumpTree2File("before");
  // save the namesystem to a temp file
  File imageFile = saveFSImageToTempFile();
  
  // 5. load fsimage and compare
  // first restart the cluster, and format the cluster
  out.close();
  cluster.shutdown();
  cluster = new MiniDFSCluster.Builder(conf).format(true)
      .numDataNodes(REPLICATION).build();
  cluster.waitActive();
  fsn = cluster.getNamesystem();
  hdfs = cluster.getFileSystem();
  // then load the fsimage
  loadFSImageFromTempFile(imageFile);
  
  // dump the fsdir tree again
  File fsnAfter = dumpTree2File("after");
  
  // compare two dumped tree
  SnapshotTestHelper.compareDumpedTreeInFile(fsnBefore, fsnAfter, true);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:56,代码来源:TestFSImageWithSnapshot.java

示例12: modify

import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
@Override
void modify() throws Exception {
  assertTrue(fs.exists(file));
  DFSTestUtil.appendFile(fs, file, appendLen);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:6,代码来源:TestSnapshot.java

示例13: testSnapshotFileLengthWithCatCommand

import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
/**
 * Adding as part of jira HDFS-5343
 * Test for checking the cat command on snapshot path it
 *  cannot read a file beyond snapshot file length
 * @throws Exception
 */
@Test (timeout = 600000)
public void testSnapshotFileLengthWithCatCommand() throws Exception {

  FSDataInputStream fis = null;
  FileStatus fileStatus = null;

  int bytesRead;
  byte[] buffer = new byte[BLOCKSIZE * 8];

  hdfs.mkdirs(sub);
  Path file1 = new Path(sub, file1Name);
  DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, REPLICATION, SEED);

  hdfs.allowSnapshot(sub);
  hdfs.createSnapshot(sub, snapshot1);

  DFSTestUtil.appendFile(hdfs, file1, BLOCKSIZE);

  // Make sure we can read the entire file via its non-snapshot path.
  fileStatus = hdfs.getFileStatus(file1);
  assertEquals("Unexpected file length", BLOCKSIZE * 2, fileStatus.getLen());
  fis = hdfs.open(file1);
  bytesRead = fis.read(buffer, 0, buffer.length);
  assertEquals("Unexpected # bytes read", BLOCKSIZE * 2, bytesRead);
  fis.close();

  Path file1snap1 =
      SnapshotTestHelper.getSnapshotPath(sub, snapshot1, file1Name);
  fis = hdfs.open(file1snap1);
  fileStatus = hdfs.getFileStatus(file1snap1);
  assertEquals(fileStatus.getLen(), BLOCKSIZE);
  // Make sure we can only read up to the snapshot length.
  bytesRead = fis.read(buffer, 0, buffer.length);
  assertEquals("Unexpected # bytes read", BLOCKSIZE, bytesRead);
  fis.close();

  PrintStream outBackup = System.out;
  PrintStream errBackup = System.err;
  ByteArrayOutputStream bao = new ByteArrayOutputStream();
  System.setOut(new PrintStream(bao));
  System.setErr(new PrintStream(bao));
  // Make sure we can cat the file upto to snapshot length
  FsShell shell = new FsShell();
  try {
    ToolRunner.run(conf, shell, new String[] { "-cat",
    "/TestSnapshotFileLength/sub1/.snapshot/snapshot1/file1" });
    assertEquals("Unexpected # bytes from -cat", BLOCKSIZE, bao.size());
  } finally {
    System.setOut(outBackup);
    System.setErr(errBackup);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:59,代码来源:TestSnapshotFileLength.java

示例14: testSnapshotPathINodesAfterModification

import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
/** 
 * for snapshot file while modifying file after snapshot.
 */
@Test (timeout=15000)
public void testSnapshotPathINodesAfterModification() throws Exception {
  // First check the INode for /TestSnapshot/sub1/file1
  String[] names = INode.getPathNames(file1.toString());
  byte[][] components = INode.getPathComponents(names);
  INodesInPath nodesInPath = INodesInPath.resolve(fsdir.rootDir,
      components, false);
  // The number of inodes should be equal to components.length
  assertEquals(nodesInPath.length(), components.length);

  // The last INode should be associated with file1
  assertEquals(nodesInPath.getINode(components.length - 1).getFullPathName(),
      file1.toString());
  // record the modification time of the inode
  final long modTime = nodesInPath.getINode(nodesInPath.length() - 1)
      .getModificationTime();
  
  // Create a snapshot for the dir, and check the inodes for the path
  // pointing to a snapshot file
  hdfs.allowSnapshot(sub1);
  hdfs.createSnapshot(sub1, "s3");
  
  // Modify file1
  DFSTestUtil.appendFile(hdfs, file1, "the content for appending");

  // Check the INodes for snapshot of file1
  String snapshotPath = sub1.toString() + "/.snapshot/s3/file1";
  names = INode.getPathNames(snapshotPath);
  components = INode.getPathComponents(names);
  INodesInPath ssNodesInPath = INodesInPath.resolve(fsdir.rootDir,
      components, false);
  // Length of ssInodes should be (components.length - 1), since we will
  // ignore ".snapshot" 
  assertEquals(ssNodesInPath.length(), components.length - 1);
  final Snapshot s3 = getSnapshot(ssNodesInPath, "s3", 3);
  assertSnapshot(ssNodesInPath, true, s3, 3);
  // Check the INode for snapshot of file1
  INode snapshotFileNode = ssNodesInPath.getLastINode();
  assertEquals(snapshotFileNode.getLocalName(), file1.getName());
  assertTrue(snapshotFileNode.asFile().isWithSnapshot());
  // The modification time of the snapshot INode should be the same with the
  // original INode before modification
  assertEquals(modTime,
      snapshotFileNode.getModificationTime(ssNodesInPath.getPathSnapshotId()));

  // Check the INode for /TestSnapshot/sub1/file1 again
  names = INode.getPathNames(file1.toString());
  components = INode.getPathComponents(names);
  INodesInPath newNodesInPath = INodesInPath.resolve(fsdir.rootDir,
      components, false);
  assertSnapshot(newNodesInPath, false, s3, -1);
  // The number of inodes should be equal to components.length
  assertEquals(newNodesInPath.length(), components.length);
  // The last INode should be associated with file1
  final int last = components.length - 1;
  assertEquals(newNodesInPath.getINode(last).getFullPathName(),
      file1.toString());
  // The modification time of the INode for file3 should have been changed
  Assert.assertFalse(modTime == newNodesInPath.getINode(last).getModificationTime());
  hdfs.deleteSnapshot(sub1, "s3");
  hdfs.disallowSnapshot(sub1);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:66,代码来源:TestSnapshotPathINodes.java


注:本文中的org.apache.hadoop.hdfs.DFSTestUtil.appendFile方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。