当前位置: 首页>>代码示例>>Java>>正文


Java DFSTestUtil.createFiles方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.DFSTestUtil.createFiles方法的典型用法代码示例。如果您正苦于以下问题:Java DFSTestUtil.createFiles方法的具体用法?Java DFSTestUtil.createFiles怎么用?Java DFSTestUtil.createFiles使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.DFSTestUtil的用法示例。


在下文中一共展示了DFSTestUtil.createFiles方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testFsckNonExistent

import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
@Test
public void testFsckNonExistent() throws Exception {
  DFSTestUtil util = new DFSTestUtil.Builder().setName("TestFsck").
      setNumFiles(20).build();
  MiniDFSCluster cluster = null;
  FileSystem fs = null;
  try {
    Configuration conf = new HdfsConfiguration();
    conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
    fs = cluster.getFileSystem();
    util.createFiles(fs, "/srcdat");
    util.waitReplication(fs, "/srcdat", (short)3);
    String outStr = runFsck(conf, 0, true, "/non-existent");
    assertEquals(-1, outStr.indexOf(NamenodeFsck.HEALTHY_STATUS));
    System.out.println(outStr);
    util.cleanup(fs, "/srcdat");
  } finally {
    if (fs != null) {try{fs.close();} catch(Exception e){}}
    if (cluster != null) { cluster.shutdown(); }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:23,代码来源:TestFsck.java

示例2: testFinalizedReplicas

import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
@Test public void testFinalizedReplicas() throws Exception {
  // bring up a cluster of 3
  Configuration conf = new HdfsConfiguration();
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024L);
  conf.setInt(DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, 512);
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
  cluster.waitActive();
  FileSystem fs = cluster.getFileSystem();
  try {
    // test finalized replicas
    final String TopDir = "/test";
    DFSTestUtil util = new DFSTestUtil.Builder().
        setName("TestDatanodeRestart").setNumFiles(2).build();
    util.createFiles(fs, TopDir, (short)3);
    util.waitReplication(fs, TopDir, (short)3);
    util.checkFiles(fs, TopDir);
    cluster.restartDataNodes();
    cluster.waitActive();
    util.checkFiles(fs, TopDir);
  } finally {
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:24,代码来源:TestDatanodeRestart.java

示例3: testFsckSymlink

import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
/** Test fsck with symlinks in the filesystem */
@Test
public void testFsckSymlink() throws Exception {
  final DFSTestUtil util = new DFSTestUtil.Builder().
      setName(getClass().getSimpleName()).setNumFiles(1).build();
  final Configuration conf = new HdfsConfiguration();
  conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);

  MiniDFSCluster cluster = null;
  FileSystem fs = null;
  try {
    final long precision = 1L;
    conf.setLong(DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY, precision);
    conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
    fs = cluster.getFileSystem();
    final String fileName = "/srcdat";
    util.createFiles(fs, fileName);
    final FileContext fc = FileContext.getFileContext(
        cluster.getConfiguration(0));
    final Path file = new Path(fileName);
    final Path symlink = new Path("/srcdat-symlink");
    fc.createSymlink(file, symlink, false);
    util.waitReplication(fs, fileName, (short)3);
    long aTime = fc.getFileStatus(symlink).getAccessTime();
    Thread.sleep(precision);
    setupAuditLogs();
    String outStr = runFsck(conf, 0, true, "/");
    verifyAuditLogs();
    assertEquals(aTime, fc.getFileStatus(symlink).getAccessTime());
    System.out.println(outStr);
    assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
    assertTrue(outStr.contains("Total symlinks:\t\t1"));
    util.cleanup(fs, fileName);
  } finally {
    if (fs != null) {try{fs.close();} catch(Exception e){}}
    if (cluster != null) { cluster.shutdown(); }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:40,代码来源:TestFsck.java

示例4: testlistCorruptFileBlocksDFS

import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
/**
 * test listCorruptFileBlocks in DistributedFileSystem
 */
@Test (timeout=300000)
public void testlistCorruptFileBlocksDFS() throws Exception {
  Configuration conf = new Configuration();
  conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000);
  conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, 1); // datanode scans
                                                         // directories
  FileSystem fs = null;

  MiniDFSCluster cluster = null;
  try {
    cluster = new MiniDFSCluster.Builder(conf).build();
    cluster.waitActive();
    fs = cluster.getFileSystem();
    DistributedFileSystem dfs = (DistributedFileSystem) fs;
    DFSTestUtil util = new DFSTestUtil.Builder().
        setName("testGetCorruptFiles").setNumFiles(3).
        setMaxLevels(1).setMaxSize(1024).build();
    util.createFiles(fs, "/corruptData");

    RemoteIterator<Path> corruptFileBlocks = 
      dfs.listCorruptFileBlocks(new Path("/corruptData"));
    int numCorrupt = countPaths(corruptFileBlocks);
    assertTrue(numCorrupt == 0);
    // delete the blocks
    String bpid = cluster.getNamesystem().getBlockPoolId();
    // For loop through number of datadirectories per datanode (2)
    for (int i = 0; i < 2; i++) {
      File storageDir = cluster.getInstanceStorageDir(0, i);
      File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
      List<File> metadataFiles = MiniDFSCluster.getAllBlockMetadataFiles(
          data_dir);
      if (metadataFiles == null)
        continue;
      // assertTrue("Blocks do not exist in data-dir", (blocks != null) &&
      // (blocks.length > 0));
      for (File metadataFile : metadataFiles) {
        File blockFile = Block.metaToBlockFile(metadataFile);
        LOG.info("Deliberately removing file " + blockFile.getName());
        assertTrue("Cannot remove file.", blockFile.delete());
        LOG.info("Deliberately removing file " + metadataFile.getName());
        assertTrue("Cannot remove file.", metadataFile.delete());
        // break;
      }
    }

    int count = 0;
    corruptFileBlocks = dfs.listCorruptFileBlocks(new Path("/corruptData"));
    numCorrupt = countPaths(corruptFileBlocks);
    while (numCorrupt < 3) {
      Thread.sleep(1000);
      corruptFileBlocks = dfs.listCorruptFileBlocks(new Path("/corruptData"));
      numCorrupt = countPaths(corruptFileBlocks);
      count++;
      if (count > 30)
        break;
    }
    // Validate we get all the corrupt files
    LOG.info("Namenode has bad files. " + numCorrupt);
    assertTrue(numCorrupt == 3);

    util.cleanup(fs, "/corruptData");
    util.cleanup(fs, "/goodData");
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:72,代码来源:TestListCorruptFileBlocks.java

示例5: testFsckPermission

import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
/** Test fsck with permission set on inodes */
@Test
public void testFsckPermission() throws Exception {
  final DFSTestUtil util = new DFSTestUtil.Builder().
      setName(getClass().getSimpleName()).setNumFiles(20).build();
  final Configuration conf = new HdfsConfiguration();
  conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);

  MiniDFSCluster cluster = null;
  try {
    // Create a cluster with the current user, write some files
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
    final MiniDFSCluster c2 = cluster;
    final String dir = "/dfsck";
    final Path dirpath = new Path(dir);
    final FileSystem fs = c2.getFileSystem();

    util.createFiles(fs, dir);
    util.waitReplication(fs, dir, (short) 3);
    fs.setPermission(dirpath, new FsPermission((short) 0700));

    // run DFSck as another user, should fail with permission issue
    UserGroupInformation fakeUGI = UserGroupInformation.createUserForTesting(
        "ProbablyNotARealUserName", new String[] { "ShangriLa" });
    fakeUGI.doAs(new PrivilegedExceptionAction<Object>() {
      @Override
      public Object run() throws Exception {
        System.out.println(runFsck(conf, -1, true, dir));
        return null;
      }
    });
    
    // set permission and try DFSck again as the fake user, should succeed
    fs.setPermission(dirpath, new FsPermission((short) 0777));
    fakeUGI.doAs(new PrivilegedExceptionAction<Object>() {
      @Override
      public Object run() throws Exception {
        final String outStr = runFsck(conf, 0, true, dir);
        System.out.println(outStr);
        assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
        return null;
      }
    });

    util.cleanup(fs, dir);
  } finally {
    if (cluster != null) { cluster.shutdown(); }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:50,代码来源:TestFsck.java

示例6: testFsckListCorruptFilesBlocks

import org.apache.hadoop.hdfs.DFSTestUtil; //导入方法依赖的package包/类
/** check if option -list-corruptfiles of fsck command works properly */
@Test
public void testFsckListCorruptFilesBlocks() throws Exception {
  Configuration conf = new Configuration();
  conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000);
  conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, 1);
  FileSystem fs = null;

  MiniDFSCluster cluster = null;
  try {
    cluster = new MiniDFSCluster.Builder(conf).build();
    cluster.waitActive();
    fs = cluster.getFileSystem();
    DFSTestUtil util = new DFSTestUtil.Builder().
        setName("testGetCorruptFiles").setNumFiles(3).setMaxLevels(1).
        setMaxSize(1024).build();
    util.createFiles(fs, "/corruptData", (short) 1);
    util.waitReplication(fs, "/corruptData", (short) 1);

    // String outStr = runFsck(conf, 0, true, "/corruptData", "-list-corruptfileblocks");
    String outStr = runFsck(conf, 0, false, "/corruptData", "-list-corruptfileblocks");
    System.out.println("1. good fsck out: " + outStr);
    assertTrue(outStr.contains("has 0 CORRUPT files"));
    // delete the blocks
    final String bpid = cluster.getNamesystem().getBlockPoolId();
    for (int i=0; i<4; i++) {
      for (int j=0; j<=1; j++) {
        File storageDir = cluster.getInstanceStorageDir(i, j);
        File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
        List<File> metadataFiles = MiniDFSCluster.getAllBlockMetadataFiles(
            data_dir);
        if (metadataFiles == null)
          continue;
        for (File metadataFile : metadataFiles) {
          File blockFile = Block.metaToBlockFile(metadataFile);
          assertTrue("Cannot remove file.", blockFile.delete());
          assertTrue("Cannot remove file.", metadataFile.delete());
        }
      }
    }

    // wait for the namenode to see the corruption
    final NamenodeProtocols namenode = cluster.getNameNodeRpc();
    CorruptFileBlocks corruptFileBlocks = namenode
        .listCorruptFileBlocks("/corruptData", null);
    int numCorrupt = corruptFileBlocks.getFiles().length;
    while (numCorrupt == 0) {
      Thread.sleep(1000);
      corruptFileBlocks = namenode
          .listCorruptFileBlocks("/corruptData", null);
      numCorrupt = corruptFileBlocks.getFiles().length;
    }
    outStr = runFsck(conf, -1, true, "/corruptData", "-list-corruptfileblocks");
    System.out.println("2. bad fsck out: " + outStr);
    assertTrue(outStr.contains("has 3 CORRUPT files"));

    // Do a listing on a dir which doesn't have any corrupt blocks and validate
    util.createFiles(fs, "/goodData");
    outStr = runFsck(conf, 0, true, "/goodData", "-list-corruptfileblocks");
    System.out.println("3. good fsck out: " + outStr);
    assertTrue(outStr.contains("has 0 CORRUPT files"));
    util.cleanup(fs,"/corruptData");
    util.cleanup(fs, "/goodData");
  } finally {
    if (cluster != null) {cluster.shutdown();}
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:68,代码来源:TestFsck.java


注:本文中的org.apache.hadoop.hdfs.DFSTestUtil.createFiles方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。