当前位置: 首页>>代码示例>>Java>>正文


Java HdfsDataOutputStream.close方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.client.HdfsDataOutputStream.close方法的典型用法代码示例。如果您正苦于以下问题:Java HdfsDataOutputStream.close方法的具体用法?Java HdfsDataOutputStream.close怎么用?Java HdfsDataOutputStream.close使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.client.HdfsDataOutputStream的用法示例。


在下文中一共展示了HdfsDataOutputStream.close方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testNoLogEntryBeforeClosing

import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; //导入方法依赖的package包/类
@Test
public void testNoLogEntryBeforeClosing() throws Exception {
  Configuration conf = new HdfsConfiguration();
  final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
      .numDataNodes(2)
      .build();
  try {
    DistributedFileSystem dfs = cluster.getFileSystem();
    Path projects = new Path("/projects");
    Path project = new Path(projects, "project");
    final Path dataset = new Path(project, "dataset");
    Path file = new Path(dataset, "file");
    dfs.mkdirs(dataset, FsPermission.getDefault());
    dfs.setMetaEnabled(dataset, true);
    HdfsDataOutputStream out = TestFileCreation.create(dfs, file, 1);
    assertFalse(checkLog(TestUtil.getINodeId(cluster.getNameNode(), file),
        MetadataLogEntry.Operation.ADD));
    out.close();
    assertTrue(checkLog(TestUtil.getINodeId(cluster.getNameNode(), file),
        MetadataLogEntry.Operation.ADD));
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:27,代码来源:TestMetadataLog.java

示例2: testBlockSending

import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; //导入方法依赖的package包/类
@Test
public void testBlockSending() throws IOException {
  DistributedFileSystem dfs = (DistributedFileSystem) getFileSystem();
  TestDfsClient testDfsClient = new TestDfsClient(getConfig());
  testDfsClient.injectIntoDfs(dfs);
  Util.createRandomFile(dfs, testFile, seed, TEST_BLOCK_COUNT,
      DFS_TEST_BLOCK_SIZE);

  FileStatus status = dfs.getFileStatus(testFile);
  LocatedBlock lb = dfs.getClient()
      .getLocatedBlocks(status.getPath().toUri().getPath(), 0, Long.MAX_VALUE)
      .get(0);
  DataNodeUtil.loseBlock(getCluster(), lb);
  List<LocatedBlock> lostBlocks = new ArrayList<LocatedBlock>();
  lostBlocks.add(lb);
  LocatedBlocks locatedBlocks =
      new LocatedBlocks(0, false, lostBlocks, null, true);
  testDfsClient.setMissingLocatedBlocks(locatedBlocks);
  LOG.info("Losing block " + lb.toString());

  HdfsDataOutputStream out = dfs.sendBlock(status.getPath(), lb, null, null);
  out.write(Util.randomBytes(seed,
      conf.getInt(DFS_BLOCK_SIZE_KEY, DFS_TEST_BLOCK_SIZE)), 0,
      DFS_TEST_BLOCK_SIZE);
  out.close();
  ExtendedBlock extendedBlock = new ExtendedBlock(lb.getBlock());
  extendedBlock.setBlockId(lb.getBlock().getBlockId());
  int number = getCluster().getAllBlockFiles(extendedBlock).length;
  Assert.assertEquals(conf.getInt(DFS_REPLICATION_KEY, DFS_REPLICATION_DEFAULT),
      number);
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:32,代码来源:TestBlockSending.java

示例3: testNonLoggingFolder

import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; //导入方法依赖的package包/类
@Test
public void testNonLoggingFolder() throws Exception {
  Configuration conf = new HdfsConfiguration();
  final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
      .numDataNodes(2)
      .build();
  try {
    DistributedFileSystem dfs = cluster.getFileSystem();
    Path projects = new Path("/projects");
    Path project = new Path(projects, "project");
    final Path dataset = new Path(project, "dataset");
    final Path subdir = new Path(dataset, "subdir");
    Path file = new Path(dataset, "file");
    dfs.mkdirs(dataset, FsPermission.getDefault());
    dfs.mkdirs(subdir);
    assertFalse(checkLog(TestUtil.getINodeId(cluster.getNameNode(), subdir),
        MetadataLogEntry.Operation.ADD));
    HdfsDataOutputStream out = TestFileCreation.create(dfs, file, 1);
    out.close();
    assertFalse(checkLog(TestUtil.getINodeId(cluster.getNameNode(), file),
        MetadataLogEntry.Operation.ADD));
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:28,代码来源:TestMetadataLog.java

示例4: testCreate

import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; //导入方法依赖的package包/类
@Test
public void testCreate() throws Exception {
  Configuration conf = new HdfsConfiguration();
  final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
      .numDataNodes(2)
      .build();
  try {
    DistributedFileSystem dfs = cluster.getFileSystem();
    Path projects = new Path("/projects");
    Path project = new Path(projects, "project");
    final Path dataset = new Path(project, "dataset");
    final Path subdir = new Path(dataset, "subdir");
    Path file = new Path(subdir, "file");
    dfs.mkdirs(dataset, FsPermission.getDefault());
    dfs.setMetaEnabled(dataset, true);
    dfs.mkdirs(subdir);
    assertTrue(checkLog(TestUtil.getINodeId(cluster.getNameNode(), subdir),
        MetadataLogEntry.Operation.ADD));
    HdfsDataOutputStream out = TestFileCreation.create(dfs, file, 1);
    out.close();
    assertTrue(checkLog(TestUtil.getINodeId(cluster.getNameNode(), file),
        MetadataLogEntry.Operation.ADD));
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:29,代码来源:TestMetadataLog.java

示例5: testDelete

import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; //导入方法依赖的package包/类
@Test
public void testDelete() throws Exception {
  Configuration conf = new HdfsConfiguration();
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
      .numDataNodes(2)
      .build();
  try {
    DistributedFileSystem dfs = cluster.getFileSystem();
    Path projects = new Path("/projects");
    Path project = new Path(projects, "project");
    Path dataset = new Path(project, "dataset");
    Path folder = new Path(dataset, "folder");
    Path file = new Path(folder, "file");
    dfs.mkdirs(folder, FsPermission.getDefault());
    dfs.setMetaEnabled(dataset, true);
    HdfsDataOutputStream out = TestFileCreation.create(dfs, file, 1);
    out.close();
    int inodeId = TestUtil.getINodeId(cluster.getNameNode(), file);
    int folderId = TestUtil.getINodeId(cluster.getNameNode(), folder);
    assertTrue(checkLog(folderId, MetadataLogEntry.Operation.ADD));
    assertTrue(checkLog(inodeId, MetadataLogEntry.Operation.ADD));
    dfs.delete(folder, true);
    assertTrue(checkLog(folderId, MetadataLogEntry.Operation.DELETE));
    assertTrue(checkLog(inodeId, MetadataLogEntry.Operation.DELETE));

    checkLogicalTimeDeleteAfterAdd(new int[]{folderId, inodeId});
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:33,代码来源:TestMetadataLog.java

示例6: testOldRename

import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; //导入方法依赖的package包/类
@Test
public void testOldRename() throws Exception {
  Configuration conf = new HdfsConfiguration();
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
      .numDataNodes(2)
      .build();
  try {
    DistributedFileSystem dfs = cluster.getFileSystem();
    Path projects = new Path("/projects");
    Path project = new Path(projects, "project");
    Path dataset0 = new Path(project, "dataset0");
    Path dataset1 = new Path(project, "dataset1");
    Path file0 = new Path(dataset0, "file");
    Path file1 = new Path(dataset1, "file");
    dfs.mkdirs(dataset0, FsPermission.getDefault());
    dfs.mkdirs(dataset1, FsPermission.getDefault());
    dfs.setMetaEnabled(dataset0, true);
    dfs.setMetaEnabled(dataset1, true);
    HdfsDataOutputStream out = TestFileCreation.create(dfs, file0, 1);
    out.close();
    int inodeId = TestUtil.getINodeId(cluster.getNameNode(), file0);
    int dataset0Id = TestUtil.getINodeId(cluster.getNameNode(), dataset0);
    int dataset1Id = TestUtil.getINodeId(cluster.getNameNode(), dataset1);
    assertTrue(checkLog(inodeId, MetadataLogEntry.Operation.ADD));
    assertTrue(dfs.rename(file0, file1));
    assertTrue(checkLog(dataset0Id, inodeId,
        MetadataLogEntry.Operation.DELETE));
    assertTrue(checkLog(dataset1Id, inodeId, MetadataLogEntry.Operation.ADD));
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:35,代码来源:TestMetadataLog.java

示例7: testRename

import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; //导入方法依赖的package包/类
public void testRename() throws Exception {
  Configuration conf = new HdfsConfiguration();
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
      .numDataNodes(2)
      .build();
  try {
    DistributedFileSystem dfs = cluster.getFileSystem();
    Path projects = new Path("/projects");
    Path project = new Path(projects, "project");
    Path dataset0 = new Path(project, "dataset0");
    Path dataset1 = new Path(project, "dataset1");
    Path file0 = new Path(dataset0, "file");
    Path file1 = new Path(dataset1, "file");
    dfs.mkdirs(dataset0, FsPermission.getDefault());
    dfs.mkdirs(dataset1, FsPermission.getDefault());
    dfs.setMetaEnabled(dataset0, true);
    dfs.setMetaEnabled(dataset1, true);
    HdfsDataOutputStream out = TestFileCreation.create(dfs, file0, 1);
    out.close();
    int inodeId = TestUtil.getINodeId(cluster.getNameNode(), file0);
    int dataset0Id = TestUtil.getINodeId(cluster.getNameNode(), dataset0);
    int dataset1Id = TestUtil.getINodeId(cluster.getNameNode(), dataset1);
    assertTrue(checkLog(inodeId, MetadataLogEntry.Operation.ADD));
    dfs.rename(file0, file1, Options.Rename.NONE);
    assertTrue(checkLog(dataset0Id, inodeId,
        MetadataLogEntry.Operation.DELETE));
    assertTrue(checkLog(dataset1Id, inodeId, MetadataLogEntry.Operation.ADD));

    checkLogicalTimeAddDeleteAdd(inodeId);

  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:37,代码来源:TestMetadataLog.java

示例8: testEnableLogForExistingDirectory

import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; //导入方法依赖的package包/类
@Test
public void testEnableLogForExistingDirectory() throws Exception {
  Configuration conf = new HdfsConfiguration();
  conf.setBoolean(DFSConfigKeys.DFS_LEGACY_DELETE_ENABLE_KEY, true);
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
      .numDataNodes(2)
      .build();
  try {
    DistributedFileSystem dfs = cluster.getFileSystem();
    Path projects = new Path("/projects");
    Path project = new Path(projects, "project");
    Path dataset = new Path(project, "dataset");
    Path folder = new Path(dataset, "folder");
    Path file = new Path(folder, "file");
    dfs.mkdirs(folder, FsPermission.getDefault());
    HdfsDataOutputStream out = TestFileCreation.create(dfs, file, 1);
    out.close();
    dfs.setMetaEnabled(dataset, true);
    int inodeId = TestUtil.getINodeId(cluster.getNameNode(), file);
    int folderId = TestUtil.getINodeId(cluster.getNameNode(), folder);
    assertTrue(checkLog(folderId, MetadataLogEntry.Operation.ADD));
    assertTrue(checkLog(inodeId, MetadataLogEntry.Operation.ADD));
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:29,代码来源:TestMetadataLog.java

示例9: testSaveLoadImageWithAppending

import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; //导入方法依赖的package包/类
/**
 * Test the fsimage saving/loading while file appending.
 */
@Test (timeout=60000)
public void testSaveLoadImageWithAppending() throws Exception {
  Path sub1 = new Path(dir, "sub1");
  Path sub1file1 = new Path(sub1, "sub1file1");
  Path sub1file2 = new Path(sub1, "sub1file2");
  DFSTestUtil.createFile(hdfs, sub1file1, BLOCKSIZE, REPLICATION, seed);
  DFSTestUtil.createFile(hdfs, sub1file2, BLOCKSIZE, REPLICATION, seed);
  
  // 1. create snapshot s0
  hdfs.allowSnapshot(dir);
  hdfs.createSnapshot(dir, "s0");
  
  // 2. create snapshot s1 before appending sub1file1 finishes
  HdfsDataOutputStream out = appendFileWithoutClosing(sub1file1, BLOCKSIZE);
  out.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
  // also append sub1file2
  DFSTestUtil.appendFile(hdfs, sub1file2, BLOCKSIZE);
  hdfs.createSnapshot(dir, "s1");
  out.close();
  
  // 3. create snapshot s2 before appending finishes
  out = appendFileWithoutClosing(sub1file1, BLOCKSIZE);
  out.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
  hdfs.createSnapshot(dir, "s2");
  out.close();
  
  // 4. save fsimage before appending finishes
  out = appendFileWithoutClosing(sub1file1, BLOCKSIZE);
  out.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
  // dump fsdir
  File fsnBefore = dumpTree2File("before");
  // save the namesystem to a temp file
  File imageFile = saveFSImageToTempFile();
  
  // 5. load fsimage and compare
  // first restart the cluster, and format the cluster
  out.close();
  cluster.shutdown();
  cluster = new MiniDFSCluster.Builder(conf).format(true)
      .numDataNodes(REPLICATION).build();
  cluster.waitActive();
  fsn = cluster.getNamesystem();
  hdfs = cluster.getFileSystem();
  // then load the fsimage
  loadFSImageFromTempFile(imageFile);
  
  // dump the fsdir tree again
  File fsnAfter = dumpTree2File("after");
  
  // compare two dumped tree
  SnapshotTestHelper.compareDumpedTreeInFile(fsnBefore, fsnAfter, true);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:56,代码来源:TestFSImageWithSnapshot.java

示例10: testAppend

import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; //导入方法依赖的package包/类
@Test
public void testAppend() throws Exception {
  Configuration conf = new HdfsConfiguration();
  final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
      .numDataNodes(2)
      .build();
  try {
    DistributedFileSystem dfs = cluster.getFileSystem();
    Path projects = new Path("/projects");
    Path project = new Path(projects, "project");
    final Path dataset = new Path(project, "dataset");
    final Path subdir = new Path(dataset, "subdir");
    Path file = new Path(subdir, "file");
    dfs.mkdirs(dataset, FsPermission.getDefault());
    dfs.setMetaEnabled(dataset, true);
    dfs.mkdirs(subdir);
    assertTrue(checkLog(TestUtil.getINodeId(cluster.getNameNode(), subdir),
        MetadataLogEntry.Operation.ADD));
    HdfsDataOutputStream out = TestFileCreation.create(dfs, file, 1);
    out.close();
    int inodeId = TestUtil.getINodeId(cluster.getNameNode(), file);
    assertTrue(checkLog(inodeId, MetadataLogEntry.Operation.ADD));

    dfs.append(file).close();
    dfs.append(file).close();

    List<MetadataLogEntry> inodeLogEntries = new
        ArrayList<>(getMetadataLogEntries(inodeId));
    Collections.sort(inodeLogEntries, LOGICAL_TIME_COMPARATOR);

    assertTrue(inodeLogEntries.size() == 3);
    for(int i=0; i<3;i++){
      assertEquals(i+1, inodeLogEntries.get(i).getLogicalTime());
      assertTrue(inodeLogEntries.get(i).getOperation() ==
          MetadataLogEntry.Operation.ADD);
    }

  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:44,代码来源:TestMetadataLog.java

示例11: testDeepOldRename

import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; //导入方法依赖的package包/类
@Test
public void testDeepOldRename() throws Exception {
  Configuration conf = new HdfsConfiguration();
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
      .numDataNodes(2)
      .build();
  try {
    DistributedFileSystem dfs = cluster.getFileSystem();
    Path projects = new Path("/projects");
    Path project = new Path(projects, "project");
    Path dataset0 = new Path(project, "dataset0");
    Path folder0 = new Path(dataset0, "folder0");
    Path dataset1 = new Path(project, "dataset1");
    Path folder1 = new Path(dataset1, "folder1");
    Path file0 = new Path(folder0, "file");

    dfs.mkdirs(folder0, FsPermission.getDefault());
    dfs.mkdirs(dataset1, FsPermission.getDefault());

    dfs.setMetaEnabled(dataset0, true);
    dfs.setMetaEnabled(dataset1, true);

    HdfsDataOutputStream out = TestFileCreation.create(dfs, file0, 1);
    out.close();

    int inodeId = TestUtil.getINodeId(cluster.getNameNode(), file0);
    int folder0Id = TestUtil.getINodeId(cluster.getNameNode(), folder0);
    int dataset0Id = TestUtil.getINodeId(cluster.getNameNode(), dataset0);
    int dataset1Id = TestUtil.getINodeId(cluster.getNameNode(), dataset1);

    assertTrue(checkLog(dataset0Id, folder0Id, MetadataLogEntry.Operation.ADD));
    assertTrue(checkLog(inodeId, MetadataLogEntry.Operation.ADD));

    dfs.rename(folder0, folder1);

    int folder1Id = TestUtil.getINodeId(cluster.getNameNode(), folder1);
    assertTrue(checkLog(dataset0Id, folder0Id,
        MetadataLogEntry.Operation.DELETE));
    assertTrue(checkLog(dataset0Id, inodeId,
        MetadataLogEntry.Operation.DELETE));
    assertTrue(checkLog(dataset1Id, folder1Id, MetadataLogEntry.Operation.ADD));
    assertTrue(checkLog(dataset1Id, inodeId, MetadataLogEntry.Operation.ADD));

    checkLogicalTimeAddDeleteAdd(folder0Id);
    checkLogicalTimeAddDeleteAdd(inodeId);

  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:53,代码来源:TestMetadataLog.java

示例12: testDeepRename

import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; //导入方法依赖的package包/类
@Test
public void testDeepRename() throws Exception {
  Configuration conf = new HdfsConfiguration();
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
      .numDataNodes(2)
      .build();
  try {
    DistributedFileSystem dfs = cluster.getFileSystem();
    Path projects = new Path("/projects");
    Path project = new Path(projects, "project");
    Path dataset0 = new Path(project, "dataset0");
    Path folder0 = new Path(dataset0, "folder0");
    Path dataset1 = new Path(project, "dataset1");
    Path folder1 = new Path(dataset1, "folder1");
    Path file0 = new Path(folder0, "file");

    dfs.mkdirs(folder0, FsPermission.getDefault());
    dfs.mkdirs(dataset1, FsPermission.getDefault());

    dfs.setMetaEnabled(dataset0, true);
    dfs.setMetaEnabled(dataset1, true);

    HdfsDataOutputStream out = TestFileCreation.create(dfs, file0, 1);
    out.close();

    int inodeId = TestUtil.getINodeId(cluster.getNameNode(), file0);
    int folder0Id = TestUtil.getINodeId(cluster.getNameNode(), folder0);
    int dataset0Id = TestUtil.getINodeId(cluster.getNameNode(), dataset0);
    int dataset1Id = TestUtil.getINodeId(cluster.getNameNode(), dataset1);

    assertTrue(checkLog(dataset0Id, folder0Id, MetadataLogEntry.Operation.ADD));
    assertTrue(checkLog(inodeId, MetadataLogEntry.Operation.ADD));

    dfs.rename(folder0, folder1, Options.Rename.NONE);

    int folder1Id = TestUtil.getINodeId(cluster.getNameNode(), folder1);
    assertTrue(checkLog(dataset0Id, folder0Id,
        MetadataLogEntry.Operation.DELETE));
    assertTrue(checkLog(dataset0Id, inodeId,
        MetadataLogEntry.Operation.DELETE));
    assertTrue(checkLog(dataset1Id, folder1Id, MetadataLogEntry.Operation.ADD));
    assertTrue(checkLog(dataset1Id, inodeId, MetadataLogEntry.Operation.ADD));

    checkLogicalTimeAddDeleteAdd(folder0Id);
    checkLogicalTimeAddDeleteAdd(inodeId);

  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:53,代码来源:TestMetadataLog.java

示例13: testDeepRenameInTheSameDataset

import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; //导入方法依赖的package包/类
private void testDeepRenameInTheSameDataset(boolean oldRename) throws
    IOException {
  Configuration conf = new HdfsConfiguration();
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
      .numDataNodes(2)
      .build();
  try {
    DistributedFileSystem dfs = cluster.getFileSystem();
    Path projects = new Path("/projects");
    Path project = new Path(projects, "project");
    Path dataset = new Path(project, "dataset");
    Path folder0 = new Path(dataset, "folder0");
    Path folder1 = new Path(folder0, "folder1");
    Path file = new Path(folder1, "file");

    Path newFolder = new Path(dataset, "newFolder");

    dfs.mkdirs(folder1, FsPermission.getDefault());

    dfs.setMetaEnabled(dataset, true);

    HdfsDataOutputStream out = TestFileCreation.create(dfs, file, 1);
    out.close();

    int inodeId = TestUtil.getINodeId(cluster.getNameNode(), file);
    int folder0Id = TestUtil.getINodeId(cluster.getNameNode(), folder0);
    int folder1Id = TestUtil.getINodeId(cluster.getNameNode(), folder1);
    int datasetId = TestUtil.getINodeId(cluster.getNameNode(), dataset);

    assertTrue(checkLog(datasetId, folder0Id, MetadataLogEntry.Operation.ADD));
    assertTrue(checkLog(datasetId, folder1Id, MetadataLogEntry.Operation
        .ADD));
    assertTrue(checkLog(inodeId, MetadataLogEntry.Operation.ADD));

    if(oldRename){
      dfs.rename(folder0, newFolder);
    }else{
      dfs.rename(folder0, newFolder, Options.Rename.NONE);
    }

    int newFolderId = TestUtil.getINodeId(cluster.getNameNode(), newFolder);
    assertTrue(checkLog(datasetId, folder0Id, folder0.getName(),
        MetadataLogEntry.Operation.DELETE));
    assertTrue(checkLog(datasetId, newFolderId, newFolder.getName(),
        MetadataLogEntry.Operation.ADD));
    assertEquals("Subfolders and files shouldn't be logged during a rename " +
        "in the same dataset", 1, getMetadataLogEntries(folder1Id).size());
    assertEquals("Subfolders and files shouldn't be logged during a rename " +
        "in the same dataset", 1, getMetadataLogEntries(inodeId).size());

    checkLogicalTimeAddDeleteAdd(folder0Id);

  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:59,代码来源:TestMetadataLog.java

示例14: testDeepRenameToNonMetaEnabledDir

import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; //导入方法依赖的package包/类
private void testDeepRenameToNonMetaEnabledDir(boolean oldRename) throws
    IOException {
  Configuration conf = new HdfsConfiguration();
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
      .numDataNodes(2)
      .build();
  try {
    DistributedFileSystem dfs = cluster.getFileSystem();
    Path projects = new Path("/projects");
    Path project = new Path(projects, "project");
    Path dataset = new Path(project, "dataset");
    Path folder0 = new Path(dataset, "folder0");
    Path folder1 = new Path(folder0, "folder1");
    Path file = new Path(folder1, "file");

    Path newFolder = new Path(project, "newFolder");

    dfs.mkdirs(folder1, FsPermission.getDefault());

    dfs.setMetaEnabled(dataset, true);

    HdfsDataOutputStream out = TestFileCreation.create(dfs, file, 1);
    out.close();

    int inodeId = TestUtil.getINodeId(cluster.getNameNode(), file);
    int folder0Id = TestUtil.getINodeId(cluster.getNameNode(), folder0);
    int folder1Id = TestUtil.getINodeId(cluster.getNameNode(), folder1);
    int datasetId = TestUtil.getINodeId(cluster.getNameNode(), dataset);

    assertTrue(checkLog(datasetId, folder0Id, MetadataLogEntry.Operation.ADD));
    assertTrue(checkLog(datasetId, folder1Id, MetadataLogEntry.Operation
        .ADD));
    assertTrue(checkLog(inodeId, MetadataLogEntry.Operation.ADD));

    if(oldRename){
      dfs.rename(folder0, newFolder);
    }else{
      dfs.rename(folder0, newFolder, Options.Rename.NONE);
    }

    int newFolderId = TestUtil.getINodeId(cluster.getNameNode(), newFolder);
    assertTrue(checkLog(datasetId, folder0Id,
        MetadataLogEntry.Operation.DELETE));
    assertTrue(checkLog(datasetId, folder1Id,
        MetadataLogEntry.Operation.DELETE));
    assertTrue(checkLog(datasetId, inodeId,
        MetadataLogEntry.Operation.DELETE));
    assertFalse(checkLog(datasetId, newFolderId, newFolder.getName(),
        MetadataLogEntry.Operation.ADD));
    assertEquals("Subfolders and files shouldn't be logged for addition " +
        "during a move to a non MetaEnabled directoy", 2,
        getMetadataLogEntries(folder1Id).size());
    assertEquals("Subfolders and files shouldn't be logged for addition " +
            "during a move to a non MetaEnabled directoy", 2,
        getMetadataLogEntries(inodeId).size());

    //Check logical times
    checkLogicalTimeDeleteAfterAdd(new int[]{folder0Id, folder1Id, inodeId});

  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:66,代码来源:TestMetadataLog.java

示例15: testSnapshotWhileAppending

import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; //导入方法依赖的package包/类
/**
 * Test snapshot during file appending, before the corresponding
 * {@link FSDataOutputStream} instance closes.
 */
@Test (timeout=60000)
public void testSnapshotWhileAppending() throws Exception {
  Path file = new Path(dir, "file");
  DFSTestUtil.createFile(hdfs, file, BLOCKSIZE, REPLICATION, seed);
  
  // 1. append without closing stream --> create snapshot
  HdfsDataOutputStream out = appendFileWithoutClosing(file, BLOCKSIZE);
  out.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
  SnapshotTestHelper.createSnapshot(hdfs, dir, "s0");
  out.close();
  
  // check: an INodeFileUnderConstructionWithSnapshot should be stored into s0's
  // deleted list, with size BLOCKSIZE*2
  INodeFile fileNode = (INodeFile) fsdir.getINode(file.toString());
  assertEquals(BLOCKSIZE * 2, fileNode.computeFileSize());
  INodeDirectorySnapshottable dirNode = (INodeDirectorySnapshottable) fsdir
      .getINode(dir.toString());
  DirectoryDiff last = dirNode.getDiffs().getLast();
  Snapshot s0 = last.snapshot;
  
  // 2. append without closing stream
  out = appendFileWithoutClosing(file, BLOCKSIZE);
  out.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
  
  // re-check nodeInDeleted_S0
  dirNode = (INodeDirectorySnapshottable) fsdir.getINode(dir.toString());
  assertEquals(BLOCKSIZE * 2, fileNode.computeFileSize(s0));
  
  // 3. take snapshot --> close stream
  hdfs.createSnapshot(dir, "s1");
  out.close();
  
  // check: an INodeFileUnderConstructionWithSnapshot with size BLOCKSIZE*3 should
  // have been stored in s1's deleted list
  fileNode = (INodeFile) fsdir.getINode(file.toString());
  dirNode = (INodeDirectorySnapshottable) fsdir.getINode(dir.toString());
  last = dirNode.getDiffs().getLast();
  Snapshot s1 = last.snapshot;
  assertTrue(fileNode instanceof INodeFileWithSnapshot);
  assertEquals(BLOCKSIZE * 3, fileNode.computeFileSize(s1));
  
  // 4. modify file --> append without closing stream --> take snapshot -->
  // close stream
  hdfs.setReplication(file, (short) (REPLICATION - 1));
  out = appendFileWithoutClosing(file, BLOCKSIZE);
  hdfs.createSnapshot(dir, "s2");
  out.close();
  
  // re-check the size of nodeInDeleted_S1
  assertEquals(BLOCKSIZE * 3, fileNode.computeFileSize(s1));
}
 
开发者ID:chendave,项目名称:hadoop-TCP,代码行数:56,代码来源:TestINodeFileUnderConstructionWithSnapshot.java


注:本文中的org.apache.hadoop.hdfs.client.HdfsDataOutputStream.close方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。