当前位置: 首页>>代码示例>>Java>>正文


Java MiniDFSCluster.getFileSystem方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.MiniDFSCluster.getFileSystem方法的典型用法代码示例。如果您正苦于以下问题:Java MiniDFSCluster.getFileSystem方法的具体用法?Java MiniDFSCluster.getFileSystem怎么用?Java MiniDFSCluster.getFileSystem使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.MiniDFSCluster的用法示例。


在下文中一共展示了MiniDFSCluster.getFileSystem方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testInvalidateOverReplicatedBlock

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
/**
 * Test over replicated block should get invalidated when decreasing the
 * replication for a partial block.
 */
@Test
public void testInvalidateOverReplicatedBlock() throws Exception {
  Configuration conf = new HdfsConfiguration();
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3)
      .build();
  try {
    final FSNamesystem namesystem = cluster.getNamesystem();
    final BlockManager bm = namesystem.getBlockManager();
    FileSystem fs = cluster.getFileSystem();
    Path p = new Path(MiniDFSCluster.getBaseDirectory(), "/foo1");
    FSDataOutputStream out = fs.create(p, (short) 2);
    out.writeBytes("HDFS-3119: " + p);
    out.hsync();
    fs.setReplication(p, (short) 1);
    out.close();
    ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, p);
    assertEquals("Expected only one live replica for the block", 1, bm
        .countNodes(block.getLocalBlock()).liveReplicas());
  } finally {
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:27,代码来源:TestOverReplicatedBlocks.java

示例2: testSufficientlySingleReplBlockUsesNewRack

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
@Test
public void testSufficientlySingleReplBlockUsesNewRack() throws Exception {
  Configuration conf = getConf();
  short REPLICATION_FACTOR = 1;
  final Path filePath = new Path("/testFile");

  String racks[] = {"/rack1", "/rack1", "/rack1", "/rack2"};
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
    .numDataNodes(racks.length).racks(racks).build();
  final FSNamesystem ns = cluster.getNameNode().getNamesystem();

  try {
    // Create a file with one block with a replication factor of 1
    final FileSystem fs = cluster.getFileSystem();
    DFSTestUtil.createFile(fs, filePath, 1L, REPLICATION_FACTOR, 1L);
    ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, filePath);
    DFSTestUtil.waitForReplication(cluster, b, 1, REPLICATION_FACTOR, 0);

    REPLICATION_FACTOR = 2;
    NameNodeAdapter.setReplication(ns, "/testFile", REPLICATION_FACTOR);
    DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
  } finally {
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:26,代码来源:TestBlocksWithNotEnoughRacks.java

示例3: write1byte

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
/**
 * 1. create files with dfs
 * 2. write 1 byte
 * 3. close file
 * 4. open the same file
 * 5. read the 1 byte and compare results
 */
static void write1byte(String methodName) throws IOException {
  final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf
      ).numDataNodes(REPLICATION + 1).build();
  final FileSystem dfs = cluster.getFileSystem();
  try {
    final Path p = new Path("/" + methodName + "/foo");
    final FSDataOutputStream out = createFile(dfs, p);
    out.write(1);
    out.close();
    
    final FSDataInputStream in = dfs.open(p);
    final int b = in.read();
    in.close();
    Assert.assertEquals(1, b);
  }
  finally {
    dfs.close();
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:28,代码来源:TestFiDataTransferProtocol.java

示例4: testReduceReplFactorRespectsRackPolicy

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
@Test
public void testReduceReplFactorRespectsRackPolicy() throws Exception {
  Configuration conf = getConf();
  short REPLICATION_FACTOR = 3;
  final Path filePath = new Path("/testFile");
  String racks[] = {"/rack1", "/rack1", "/rack2", "/rack2"};
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
    .numDataNodes(racks.length).racks(racks).build();
  final FSNamesystem ns = cluster.getNameNode().getNamesystem();

  try {
    // Create a file with one block
    final FileSystem fs = cluster.getFileSystem();
    DFSTestUtil.createFile(fs, filePath, 1L, REPLICATION_FACTOR, 1L);
    ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, filePath);
    DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);

    // Decrease the replication factor, make sure the deleted replica
    // was not the one that lived on the rack with only one replica,
    // ie we should still have 2 racks after reducing the repl factor.
    REPLICATION_FACTOR = 2;
    NameNodeAdapter.setReplication(ns, "/testFile", REPLICATION_FACTOR); 

    DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
  } finally {
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:29,代码来源:TestBlocksWithNotEnoughRacks.java

示例5: testScheduleSameBlock

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
@Test
public void testScheduleSameBlock() throws IOException {
  final Configuration conf = new HdfsConfiguration();
  final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
      .numDataNodes(4).build();
  try {
    cluster.waitActive();
    final DistributedFileSystem dfs = cluster.getFileSystem();
    final String file = "/testScheduleSameBlock/file";
    
    {
      final FSDataOutputStream out = dfs.create(new Path(file));
      out.writeChars("testScheduleSameBlock");
      out.close();
    }

    final Mover mover = newMover(conf);
    mover.init();
    final Mover.Processor processor = mover.new Processor();

    final LocatedBlock lb = dfs.getClient().getLocatedBlocks(file, 0).get(0);
    final List<MLocation> locations = MLocation.toLocations(lb);
    final MLocation ml = locations.get(0);
    final DBlock db = mover.newDBlock(lb.getBlock().getLocalBlock(), locations);

    final List<StorageType> storageTypes = new ArrayList<StorageType>(
        Arrays.asList(StorageType.DEFAULT, StorageType.DEFAULT));
    Assert.assertTrue(processor.scheduleMoveReplica(db, ml, storageTypes));
    Assert.assertFalse(processor.scheduleMoveReplica(db, ml, storageTypes));
  } finally {
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:34,代码来源:TestMover.java

示例6: testToCheckTheFsckCommandOnIllegalArguments

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
/**
 * Test for checking fsck command on illegal arguments should print the proper
 * usage.
 */
@Test
public void testToCheckTheFsckCommandOnIllegalArguments() throws Exception {
  MiniDFSCluster cluster = null;
  try {
    // bring up a one-node cluster
    Configuration conf = new HdfsConfiguration();
    cluster = new MiniDFSCluster.Builder(conf).build();
    String fileName = "/test.txt";
    Path filePath = new Path(fileName);
    FileSystem fs = cluster.getFileSystem();

    // create a one-block file
    DFSTestUtil.createFile(fs, filePath, 1L, (short) 1, 1L);
    DFSTestUtil.waitReplication(fs, filePath, (short) 1);

    // passing illegal option
    String outStr = runFsck(conf, -1, true, fileName, "-thisIsNotAValidFlag");
    System.out.println(outStr);
    assertTrue(!outStr.contains(NamenodeFsck.HEALTHY_STATUS));

    // passing multiple paths are arguments
    outStr = runFsck(conf, -1, true, "/", fileName);
    System.out.println(outStr);
    assertTrue(!outStr.contains(NamenodeFsck.HEALTHY_STATUS));
    // clean up file system
    fs.delete(filePath, true);
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:37,代码来源:TestFsck.java

示例7: testGlobbing

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
/** test globbing  */
public void testGlobbing() throws Exception {
  String namenode = null;
  MiniDFSCluster cluster = null;
  try {
    Configuration conf = new Configuration();
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
    final FileSystem hdfs = cluster.getFileSystem();
    namenode = FileSystem.getDefaultUri(conf).toString();
    if (namenode.startsWith("hdfs://")) {
      MyFile[] files = createFiles(URI.create(namenode), "/srcdat");
      ToolRunner.run(new DistCpV1(conf), new String[] {
                                       "-log",
                                       namenode+"/logs",
                                       namenode+"/srcdat/*",
                                       namenode+"/destdat"});
      assertTrue("Source and destination directories do not match.",
                 checkFiles(hdfs, "/destdat", files));
      FileSystem fs = FileSystem.get(URI.create(namenode+"/logs"), conf);
      assertTrue("Log directory does not exist.",
                 fs.exists(new Path(namenode+"/logs")));
      deldir(hdfs, "/destdat");
      deldir(hdfs, "/srcdat");
      deldir(hdfs, "/logs");
    }
  } finally {
    if (cluster != null) { cluster.shutdown(); }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:30,代码来源:TestCopyFiles.java

示例8: testDeprecatedGetBlockLocalPathInfoRpc

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
@Test(timeout=10000)
public void testDeprecatedGetBlockLocalPathInfoRpc() throws IOException {
  final Configuration conf = new Configuration();
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
      .format(true).build();
  cluster.waitActive();
  FileSystem fs = cluster.getFileSystem();
  try {
    DFSTestUtil.createFile(fs, new Path("/tmp/x"), 16, (short) 1, 23);
    LocatedBlocks lb = cluster.getNameNode().getRpcServer()
        .getBlockLocations("/tmp/x", 0, 16);
    // Create a new block object, because the block inside LocatedBlock at
    // namenode is of type BlockInfo.
    ExtendedBlock blk = new ExtendedBlock(lb.get(0).getBlock());
    Token<BlockTokenIdentifier> token = lb.get(0).getBlockToken();
    final DatanodeInfo dnInfo = lb.get(0).getLocations()[0];
    ClientDatanodeProtocol proxy = 
        DFSUtil.createClientDatanodeProtocolProxy(dnInfo, conf, 60000, false);
    try {
      proxy.getBlockLocalPathInfo(blk, token);
      Assert.fail("The call should have failed as this user "
          + " is not allowed to call getBlockLocalPathInfo");
    } catch (IOException ex) {
      Assert.assertTrue(ex.getMessage().contains(
          "not allowed to call getBlockLocalPathInfo"));
    }
  } finally {
    fs.close();
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:32,代码来源:TestShortCircuitLocalRead.java

示例9: testByAddingAnExtraDataNode

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
/**
 * The corrupt block has to be removed when the number of valid replicas
 * matches replication factor for the file. In this test, the above 
 * condition is achieved by increasing the number of good replicas by 
 * replicating on a new Datanode. 
 * The test strategy : 
 *   Bring up Cluster with 3 DataNodes
 *   Create a file  of replication factor 3
 *   Corrupt one replica of a block of the file 
 *   Verify that there are still 2 good replicas and 1 corrupt replica 
 *     (corrupt replica should not be removed since number of good replicas
 *      (2) is less  than replication factor (3)) 
 *   Start a new data node 
 *   Verify that the a new replica is created and corrupt replica is
 *   removed.
 * 
 */
@Test
public void testByAddingAnExtraDataNode() throws Exception {
  Configuration conf = new HdfsConfiguration();
  conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L);
  conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, Integer.toString(2));
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
  FileSystem fs = cluster.getFileSystem();
  final FSNamesystem namesystem = cluster.getNamesystem();
  DataNodeProperties dnPropsFourth = cluster.stopDataNode(3);

  try {
    final Path fileName = new Path("/foo1");
    DFSTestUtil.createFile(fs, fileName, 2, (short) 3, 0L);
    DFSTestUtil.waitReplication(fs, fileName, (short) 3);

    ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, fileName);
    corruptBlock(cluster, fs, fileName, 0, block);

    DFSTestUtil.waitReplication(fs, fileName, (short) 2);

    assertEquals(2, countReplicas(namesystem, block).liveReplicas());
    assertEquals(1, countReplicas(namesystem, block).corruptReplicas());

    cluster.restartDataNode(dnPropsFourth);

    DFSTestUtil.waitReplication(fs, fileName, (short) 3);

    assertEquals(3, countReplicas(namesystem, block).liveReplicas());
    assertEquals(0, countReplicas(namesystem, block).corruptReplicas());
  } finally {
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:51,代码来源:TestProcessCorruptBlocks.java

示例10: testDataNodeTimeSpend

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
/**
 * This function ensures that writing causes TotalWritetime to increment
 * and reading causes totalReadTime to move.
 * @throws Exception
 */
@Test
public void testDataNodeTimeSpend() throws Exception {
  Configuration conf = new HdfsConfiguration();
  SimulatedFSDataset.setFactory(conf);
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
  try {
    FileSystem fs = cluster.getFileSystem();
    List<DataNode> datanodes = cluster.getDataNodes();
    assertEquals(datanodes.size(), 1);
    DataNode datanode = datanodes.get(0);
    MetricsRecordBuilder rb = getMetrics(datanode.getMetrics().name());
    final long LONG_FILE_LEN = 1024 * 1024 * 10;

    long startWriteValue = getLongCounter("TotalWriteTime", rb);
    long startReadValue = getLongCounter("TotalReadTime", rb);

    for (int x =0; x < 50; x++) {
      DFSTestUtil.createFile(fs, new Path("/time.txt."+ x),
              LONG_FILE_LEN, (short) 1, Time.monotonicNow());
    }

    for (int x =0; x < 50; x++) {
      String s = DFSTestUtil.readFile(fs, new Path("/time.txt." + x));
    }

    MetricsRecordBuilder rbNew = getMetrics(datanode.getMetrics().name());
    long endWriteValue = getLongCounter("TotalWriteTime", rbNew);
    long endReadValue = getLongCounter("TotalReadTime", rbNew);

    assertTrue(endReadValue > startReadValue);
    assertTrue(endWriteValue > startWriteValue);
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:43,代码来源:TestDataNodeMetrics.java

示例11: testUnderReplicatedUsesNewRacks

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
@Test
public void testUnderReplicatedUsesNewRacks() throws Exception {
  Configuration conf = getConf();
  short REPLICATION_FACTOR = 3;
  final Path filePath = new Path("/testFile");
  // All datanodes are on the same rack
  String racks[] = {"/rack1", "/rack1", "/rack1", "/rack1", "/rack1"};
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
    .numDataNodes(racks.length).racks(racks).build();
  final FSNamesystem ns = cluster.getNameNode().getNamesystem();

  try {
    // Create a file with one block
    final FileSystem fs = cluster.getFileSystem();
    DFSTestUtil.createFile(fs, filePath, 1L, REPLICATION_FACTOR, 1L);
    ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, filePath);
    DFSTestUtil.waitForReplication(cluster, b, 1, REPLICATION_FACTOR, 0);
    
    // Add new datanodes on a different rack and increase the
    // replication factor so the block is underreplicated and make
    // sure at least one of the hosts on the new rack is used. 
    String newRacks[] = {"/rack2", "/rack2"};
    cluster.startDataNodes(conf, 2, true, null, newRacks);
    REPLICATION_FACTOR = 5;
    NameNodeAdapter.setReplication(ns, "/testFile", REPLICATION_FACTOR);

    DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
  } finally {
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:32,代码来源:TestBlocksWithNotEnoughRacks.java

示例12: testDeleteLocal

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
/**
 * verify that -delete option works for other {@link FileSystem}
 * implementations. See MAPREDUCE-1285 */
public void testDeleteLocal() throws Exception {
  MiniDFSCluster cluster = null;
  try {
    Configuration conf = new Configuration();
    final FileSystem localfs = FileSystem.get(LOCAL_FS, conf);
    cluster = new MiniDFSCluster.Builder(conf).build();
    final FileSystem hdfs = cluster.getFileSystem();
    final String namenode = FileSystem.getDefaultUri(conf).toString();
    if (namenode.startsWith("hdfs://")) {
      MyFile[] files = createFiles(URI.create(namenode), "/srcdat");
      String destdir = TEST_ROOT_DIR + "/destdat";
      MyFile[] localFiles = createFiles(localfs, destdir);
      ToolRunner.run(new DistCpV1(conf), new String[] {
                                       "-delete",
                                       "-update",
                                       "-log",
                                       "/logs",
                                       namenode+"/srcdat",
                                       "file:///"+TEST_ROOT_DIR+"/destdat"});
      assertTrue("Source and destination directories do not match.",
                 checkFiles(localfs, destdir, files));
      assertTrue("Log directory does not exist.",
                  hdfs.exists(new Path("/logs")));
      deldir(localfs, destdir);
      deldir(hdfs, "/logs");
      deldir(hdfs, "/srcdat");
    }
  } finally {
    if (cluster != null) { cluster.shutdown(); }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:35,代码来源:TestCopyFiles.java

示例13: testDataXceiverHandlesRequestShortCircuitShmFailure

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
@Test(timeout=60000)
public void testDataXceiverHandlesRequestShortCircuitShmFailure()
    throws Exception {
  BlockReaderTestUtil.enableShortCircuitShmTracing();
  TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
  Configuration conf = createShortCircuitConf(
      "testDataXceiverHandlesRequestShortCircuitShmFailure", sockDir);
  conf.setLong(DFS_CLIENT_READ_SHORTCIRCUIT_STREAMS_CACHE_EXPIRY_MS_KEY,
      1000000000L);
  MiniDFSCluster cluster =
      new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
  cluster.waitActive();
  DistributedFileSystem fs = cluster.getFileSystem();
  final Path TEST_PATH1 = new Path("/test_file1");
  DFSTestUtil.createFile(fs, TEST_PATH1, 4096,
      (short)1, 0xFADE1);
  LOG.info("Setting failure injector and performing a read which " +
      "should fail...");
  DataNodeFaultInjector failureInjector = Mockito.mock(DataNodeFaultInjector.class);
  Mockito.doAnswer(new Answer<Void>() {
    @Override
    public Void answer(InvocationOnMock invocation) throws Throwable {
      throw new IOException("injected error into sendShmResponse");
    }
  }).when(failureInjector).sendShortCircuitShmResponse();
  DataNodeFaultInjector prevInjector = DataNodeFaultInjector.instance;
  DataNodeFaultInjector.instance = failureInjector;

  try {
    // The first read will try to allocate a shared memory segment and slot.
    // The shared memory segment allocation will fail because of the failure
    // injector.
    DFSTestUtil.readFileBuffer(fs, TEST_PATH1);
    Assert.fail("expected readFileBuffer to fail, but it succeeded.");
  } catch (Throwable t) {
    GenericTestUtils.assertExceptionContains("TCP reads were disabled for " +
        "testing, but we failed to do a non-TCP read.", t);
  }

  checkNumberOfSegmentsAndSlots(0, 0,
      cluster.getDataNodes().get(0).getShortCircuitRegistry());

  LOG.info("Clearing failure injector and performing another read...");
  DataNodeFaultInjector.instance = prevInjector;

  fs.getClient().getClientContext().getDomainSocketFactory().clearPathMap();

  // The second read should succeed.
  DFSTestUtil.readFileBuffer(fs, TEST_PATH1);

  // We should have added a new short-circuit shared memory segment and slot.
  checkNumberOfSegmentsAndSlots(1, 1,
      cluster.getDataNodes().get(0).getShortCircuitRegistry());

  cluster.shutdown();
  sockDir.close();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:58,代码来源:TestShortCircuitCache.java

示例14: createCheckPoint

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
/**
 * Create a number of fsimage checkpoints
 * @param count number of checkpoints to create
 * @throws IOException
 */
public void createCheckPoint(int count) throws IOException {
  LOG.info("--starting mini cluster");
  // manage dirs parameter set to false 
  MiniDFSCluster cluster = null;
  SecondaryNameNode sn = null;
  
  try {
    cluster = new MiniDFSCluster.Builder(config)
                                .manageDataDfsDirs(false)
                                .manageNameDfsDirs(false).build();
    cluster.waitActive();

    LOG.info("--starting Secondary Node");

    // start secondary node
    sn = new SecondaryNameNode(config);
    assertNotNull(sn);

    // Create count new files and checkpoints
    for (int i=0; i<count; i++) {
      // create a file
      FileSystem fileSys = cluster.getFileSystem();
      Path p = new Path("t" + i);
      this.writeFile(fileSys, p, 1);
      LOG.info("--file " + p.toString() + " created");
      LOG.info("--doing checkpoint");
      sn.doCheckpoint();  // this shouldn't fail
      LOG.info("--done checkpoint");
    }
  } catch (IOException e) {
    fail(StringUtils.stringifyException(e));
    System.err.println("checkpoint failed");
    throw e;
  }  finally {
    if(sn!=null)
      sn.shutdown();
    if(cluster!=null) 
      cluster.shutdown();
    LOG.info("--cluster shutdown");
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:47,代码来源:TestStartup.java

示例15: testGetFullPathNameAfterSetQuota

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
/**
 * FSDirectory#unprotectedSetQuota creates a new INodeDirectoryWithQuota to
 * replace the original INodeDirectory. Before HDFS-4243, the parent field of
 * all the children INodes of the target INodeDirectory is not changed to
 * point to the new INodeDirectoryWithQuota. This testcase tests this
 * scenario.
 */
@Test
public void testGetFullPathNameAfterSetQuota() throws Exception {
  long fileLen = 1024;
  replication = 3;
  Configuration conf = new Configuration();
  MiniDFSCluster cluster = null;
  try {
    cluster =
        new MiniDFSCluster.Builder(conf).numDataNodes(replication).build();
    cluster.waitActive();
    FSNamesystem fsn = cluster.getNamesystem();
    FSDirectory fsdir = fsn.getFSDirectory();
    DistributedFileSystem dfs = cluster.getFileSystem();

    // Create a file for test
    final Path dir = new Path("/dir");
    final Path file = new Path(dir, "file");
    DFSTestUtil.createFile(dfs, file, fileLen, replication, 0L);

    // Check the full path name of the INode associating with the file
    INode fnode = fsdir.getINode(file.toString());
    assertEquals(file.toString(), fnode.getFullPathName());
    
    // Call FSDirectory#unprotectedSetQuota which calls
    // INodeDirectory#replaceChild
    dfs.setQuota(dir, Long.MAX_VALUE - 1, replication * fileLen * 10);
    INodeDirectory dirNode = getDir(fsdir, dir);
    assertEquals(dir.toString(), dirNode.getFullPathName());
    assertTrue(dirNode.isWithQuota());
    
    final Path newDir = new Path("/newdir");
    final Path newFile = new Path(newDir, "file");
    // Also rename dir
    dfs.rename(dir, newDir, Options.Rename.OVERWRITE);
    // /dir/file now should be renamed to /newdir/file
    fnode = fsdir.getINode(newFile.toString());
    // getFullPathName can return correct result only if the parent field of
    // child node is set correctly
    assertEquals(newFile.toString(), fnode.getFullPathName());
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:53,代码来源:TestINodeFile.java


注:本文中的org.apache.hadoop.hdfs.MiniDFSCluster.getFileSystem方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。