当前位置: 首页>>代码示例>>Java>>正文


Java MiniDFSCluster.getNamesystem方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.MiniDFSCluster.getNamesystem方法的典型用法代码示例。如果您正苦于以下问题:Java MiniDFSCluster.getNamesystem方法的具体用法?Java MiniDFSCluster.getNamesystem怎么用?Java MiniDFSCluster.getNamesystem使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.MiniDFSCluster的用法示例。


在下文中一共展示了MiniDFSCluster.getNamesystem方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testPreTxidEditLogWithEdits

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
/**
 * Test case for loading a very simple edit log from a format
 * prior to the inclusion of edit transaction IDs in the log.
 */
@Test
public void testPreTxidEditLogWithEdits() throws Exception {
  Configuration conf = new HdfsConfiguration();
  MiniDFSCluster cluster = null;

  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
    cluster.waitActive();
    final FSNamesystem namesystem = cluster.getNamesystem();

    long numEdits = testLoad(HADOOP20_SOME_EDITS, namesystem);
    assertEquals(3, numEdits);
    // Sanity check the edit
    HdfsFileStatus fileInfo = namesystem.getFileInfo("/myfile", false);
    assertEquals("supergroup", fileInfo.getGroup());
    assertEquals(3, fileInfo.getReplication());
  } finally {
    if (cluster != null) { cluster.shutdown(); }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:25,代码来源:TestEditLog.java

示例2: testInvalidateOverReplicatedBlock

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
/**
 * Test over replicated block should get invalidated when decreasing the
 * replication for a partial block.
 */
@Test
public void testInvalidateOverReplicatedBlock() throws Exception {
  Configuration conf = new HdfsConfiguration();
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3)
      .build();
  try {
    final FSNamesystem namesystem = cluster.getNamesystem();
    final BlockManager bm = namesystem.getBlockManager();
    FileSystem fs = cluster.getFileSystem();
    Path p = new Path(MiniDFSCluster.getBaseDirectory(), "/foo1");
    FSDataOutputStream out = fs.create(p, (short) 2);
    out.writeBytes("HDFS-3119: " + p);
    out.hsync();
    fs.setReplication(p, (short) 1);
    out.close();
    ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, p);
    assertEquals("Expected only one live replica for the block", 1, bm
        .countNodes(block.getLocalBlock()).liveReplicas());
  } finally {
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:27,代码来源:TestOverReplicatedBlocks.java

示例3: testWhenDecreasingReplication

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
/**
 * The corrupt block has to be removed when the number of valid replicas
 * matches replication factor for the file. In this the above condition is
 * tested by reducing the replication factor 
 * The test strategy : 
 *   Bring up Cluster with 3 DataNodes
 *   Create a file of replication factor 3 
 *   Corrupt one replica of a block of the file 
 *   Verify that there are still 2 good replicas and 1 corrupt replica
 *    (corrupt replica should not be removed since number of good
 *     replicas (2) is less than replication factor (3))
 *   Set the replication factor to 2 
 *   Verify that the corrupt replica is removed. 
 *     (corrupt replica  should not be removed since number of good
 *      replicas (2) is equal to replication factor (2))
 */
@Test
public void testWhenDecreasingReplication() throws Exception {
  Configuration conf = new HdfsConfiguration();
  conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L);
  conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, Integer.toString(2));
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
  FileSystem fs = cluster.getFileSystem();
  final FSNamesystem namesystem = cluster.getNamesystem();

  try {
    final Path fileName = new Path("/foo1");
    DFSTestUtil.createFile(fs, fileName, 2, (short) 3, 0L);
    DFSTestUtil.waitReplication(fs, fileName, (short) 3);

    ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, fileName);
    corruptBlock(cluster, fs, fileName, 0, block);

    DFSTestUtil.waitReplication(fs, fileName, (short) 2);

    assertEquals(2, countReplicas(namesystem, block).liveReplicas());
    assertEquals(1, countReplicas(namesystem, block).corruptReplicas());

    namesystem.setReplication(fileName.toString(), (short) 2);

    // wait for 3 seconds so that all block reports are processed.
    try {
      Thread.sleep(3000);
    } catch (InterruptedException ignored) {
    }

    assertEquals(2, countReplicas(namesystem, block).liveReplicas());
    assertEquals(0, countReplicas(namesystem, block).corruptReplicas());

  } finally {
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:54,代码来源:TestProcessCorruptBlocks.java

示例4: testByAddingAnExtraDataNode

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
/**
 * The corrupt block has to be removed when the number of valid replicas
 * matches replication factor for the file. In this test, the above 
 * condition is achieved by increasing the number of good replicas by 
 * replicating on a new Datanode. 
 * The test strategy : 
 *   Bring up Cluster with 3 DataNodes
 *   Create a file  of replication factor 3
 *   Corrupt one replica of a block of the file 
 *   Verify that there are still 2 good replicas and 1 corrupt replica 
 *     (corrupt replica should not be removed since number of good replicas
 *      (2) is less  than replication factor (3)) 
 *   Start a new data node 
 *   Verify that the a new replica is created and corrupt replica is
 *   removed.
 * 
 */
@Test
public void testByAddingAnExtraDataNode() throws Exception {
  Configuration conf = new HdfsConfiguration();
  conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L);
  conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, Integer.toString(2));
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
  FileSystem fs = cluster.getFileSystem();
  final FSNamesystem namesystem = cluster.getNamesystem();
  DataNodeProperties dnPropsFourth = cluster.stopDataNode(3);

  try {
    final Path fileName = new Path("/foo1");
    DFSTestUtil.createFile(fs, fileName, 2, (short) 3, 0L);
    DFSTestUtil.waitReplication(fs, fileName, (short) 3);

    ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, fileName);
    corruptBlock(cluster, fs, fileName, 0, block);

    DFSTestUtil.waitReplication(fs, fileName, (short) 2);

    assertEquals(2, countReplicas(namesystem, block).liveReplicas());
    assertEquals(1, countReplicas(namesystem, block).corruptReplicas());

    cluster.restartDataNode(dnPropsFourth);

    DFSTestUtil.waitReplication(fs, fileName, (short) 3);

    assertEquals(3, countReplicas(namesystem, block).liveReplicas());
    assertEquals(0, countReplicas(namesystem, block).corruptReplicas());
  } finally {
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:51,代码来源:TestProcessCorruptBlocks.java

示例5: testBatchedSyncWithClosedLogs

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
/**
 * Test what happens with the following sequence:
 *
 *  Thread A writes edit
 *  Thread B calls logSyncAll
 *           calls close() on stream
 *  Thread A calls logSync
 *
 * This sequence is legal and can occur if enterSafeMode() is closely
 * followed by saveNamespace.
 */
@Test
public void testBatchedSyncWithClosedLogs() throws Exception {
  // start a cluster 
  Configuration conf = new HdfsConfiguration();
  MiniDFSCluster cluster = null;
  FileSystem fileSys = null;
  ExecutorService threadA = Executors.newSingleThreadExecutor();
  ExecutorService threadB = Executors.newSingleThreadExecutor();
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).build();
    cluster.waitActive();
    fileSys = cluster.getFileSystem();
    final FSNamesystem namesystem = cluster.getNamesystem();

    FSImage fsimage = namesystem.getFSImage();
    final FSEditLog editLog = fsimage.getEditLog();

    // Log an edit from thread A
    doLogEdit(threadA, editLog, "thread-a 1");
    assertEquals("logging edit without syncing should do not affect txid",
      1, editLog.getSyncTxId());

    // logSyncAll in Thread B
    doCallLogSyncAll(threadB, editLog);
    assertEquals("logSyncAll should sync thread A's transaction",
      2, editLog.getSyncTxId());

    // Close edit log
    editLog.close();

    // Ask thread A to finish sync (which should be a no-op)
    doCallLogSync(threadA, editLog);
  } finally {
    threadA.shutdown();
    threadB.shutdown();
    if(fileSys != null) fileSys.close();
    if(cluster != null) cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:51,代码来源:TestEditLog.java

示例6: testHAMetrics

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
@Test(timeout = 300000)
public void testHAMetrics() throws Exception {
  Configuration conf = new Configuration();
  conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
  conf.setInt(DFSConfigKeys.DFS_HA_LOGROLL_PERIOD_KEY, Integer.MAX_VALUE);

  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
      .nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(1)
      .build();
  FileSystem fs = null;
  try {
    cluster.waitActive();
    
    FSNamesystem nn0 = cluster.getNamesystem(0);
    FSNamesystem nn1 = cluster.getNamesystem(1);
    
    assertEquals(nn0.getHAState(), "standby");
    assertTrue(0 < nn0.getMillisSinceLastLoadedEdits());
    assertEquals(nn1.getHAState(), "standby");
    assertTrue(0 < nn1.getMillisSinceLastLoadedEdits());

    cluster.transitionToActive(0);
    final MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
    final ObjectName mxbeanName =
        new ObjectName("Hadoop:service=NameNode,name=NameNodeStatus");
    final Long ltt1 =
        (Long) mbs.getAttribute(mxbeanName, "LastHATransitionTime");
    assertTrue("lastHATransitionTime should be > 0", ltt1 > 0);
    
    assertEquals("active", nn0.getHAState());
    assertEquals(0, nn0.getMillisSinceLastLoadedEdits());
    assertEquals("standby", nn1.getHAState());
    assertTrue(0 < nn1.getMillisSinceLastLoadedEdits());
    
    cluster.transitionToStandby(0);
    final Long ltt2 =
        (Long) mbs.getAttribute(mxbeanName, "LastHATransitionTime");
    assertTrue("lastHATransitionTime should be > " + ltt1, ltt2 > ltt1);
    cluster.transitionToActive(1);
    
    assertEquals("standby", nn0.getHAState());
    assertTrue(0 < nn0.getMillisSinceLastLoadedEdits());
    assertEquals("active", nn1.getHAState());
    assertEquals(0, nn1.getMillisSinceLastLoadedEdits());
    
    Thread.sleep(2000); // make sure standby gets a little out-of-date
    assertTrue(2000 <= nn0.getMillisSinceLastLoadedEdits());
    
    assertEquals(0, nn0.getPendingDataNodeMessageCount());
    assertEquals(0, nn1.getPendingDataNodeMessageCount());
    
    fs = HATestUtil.configureFailoverFs(cluster, conf);
    DFSTestUtil.createFile(fs, new Path("/foo"),
        10, (short)1, 1L);
    
    assertTrue(0 < nn0.getPendingDataNodeMessageCount());
    assertEquals(0, nn1.getPendingDataNodeMessageCount());
    long millisSinceLastLoadedEdits = nn0.getMillisSinceLastLoadedEdits();
    
    HATestUtil.waitForStandbyToCatchUp(cluster.getNameNode(1),
        cluster.getNameNode(0));
    
    assertEquals(0, nn0.getPendingDataNodeMessageCount());
    assertEquals(0, nn1.getPendingDataNodeMessageCount());
    long newMillisSinceLastLoadedEdits = nn0.getMillisSinceLastLoadedEdits();
    // Since we just waited for the standby to catch up, the time since we
    // last loaded edits should be very low.
    assertTrue("expected " + millisSinceLastLoadedEdits + " > " +
        newMillisSinceLastLoadedEdits,
        millisSinceLastLoadedEdits > newMillisSinceLastLoadedEdits);
  } finally {
    IOUtils.cleanup(LOG, fs);
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:76,代码来源:TestHAMetrics.java

示例7: testEditLog

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
/**
 * Tests transaction logging in dfs.
 */
@Test
public void testEditLog() throws IOException {

  // start a cluster 
  Configuration conf = new HdfsConfiguration();
  MiniDFSCluster cluster = null;
  FileSystem fileSys = null;

  try {
    conf.setBoolean(
        DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);

    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).build();
    cluster.waitActive();
    fileSys = cluster.getFileSystem();
    final FSNamesystem namesystem = cluster.getNamesystem();

    for (Iterator<URI> it = cluster.getNameDirs(0).iterator(); it.hasNext(); ) {
      File dir = new File(it.next().getPath());
      System.out.println(dir);
    }
    
    FSImage fsimage = namesystem.getFSImage();
    FSEditLog editLog = fsimage.getEditLog();

    // set small size of flush buffer
    editLog.setOutputBufferCapacity(2048);
  
    // Create threads and make them run transactions concurrently.
    Thread threadId[] = new Thread[NUM_THREADS];
    for (int i = 0; i < NUM_THREADS; i++) {
      Transactions trans = new Transactions(namesystem, NUM_TRANSACTIONS);
      threadId[i] = new Thread(trans, "TransactionThread-" + i);
      threadId[i].start();
    }

    // wait for all transactions to get over
    for (int i = 0; i < NUM_THREADS; i++) {
      try {
        threadId[i].join();
      } catch (InterruptedException e) {
        i--;      // retry 
      }
    } 
    
    editLog.close();
      
    // Verify that we can read in all the transactions that we have written.
    // If there were any corruptions, it is likely that the reading in
    // of these transactions will throw an exception.
    //
    namesystem.getDelegationTokenSecretManager().stopThreads();
    int numKeys = namesystem.getDelegationTokenSecretManager().getNumberOfKeys();
    int expectedTransactions = NUM_THREADS * opsPerTrans * NUM_TRANSACTIONS + numKeys
        + 2; // + 2 for BEGIN and END txns

    for (StorageDirectory sd : fsimage.getStorage().dirIterable(NameNodeDirType.EDITS)) {
      File editFile = NNStorage.getFinalizedEditsFile(sd, 1, 1 + expectedTransactions - 1);
      System.out.println("Verifying file: " + editFile);
      
      FSEditLogLoader loader = new FSEditLogLoader(namesystem, 0);        
      long numEdits = loader.loadFSEdits(
          new EditLogFileInputStream(editFile), 1);
      assertEquals("Verification for " + editFile, expectedTransactions, numEdits);
    }
  } finally {
    if(fileSys != null) fileSys.close();
    if(cluster != null) cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:74,代码来源:TestSecurityTokenEditLog.java

示例8: testGetFullPathNameAfterSetQuota

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
/**
 * FSDirectory#unprotectedSetQuota creates a new INodeDirectoryWithQuota to
 * replace the original INodeDirectory. Before HDFS-4243, the parent field of
 * all the children INodes of the target INodeDirectory is not changed to
 * point to the new INodeDirectoryWithQuota. This testcase tests this
 * scenario.
 */
@Test
public void testGetFullPathNameAfterSetQuota() throws Exception {
  long fileLen = 1024;
  replication = 3;
  Configuration conf = new Configuration();
  MiniDFSCluster cluster = null;
  try {
    cluster =
        new MiniDFSCluster.Builder(conf).numDataNodes(replication).build();
    cluster.waitActive();
    FSNamesystem fsn = cluster.getNamesystem();
    FSDirectory fsdir = fsn.getFSDirectory();
    DistributedFileSystem dfs = cluster.getFileSystem();

    // Create a file for test
    final Path dir = new Path("/dir");
    final Path file = new Path(dir, "file");
    DFSTestUtil.createFile(dfs, file, fileLen, replication, 0L);

    // Check the full path name of the INode associating with the file
    INode fnode = fsdir.getINode(file.toString());
    assertEquals(file.toString(), fnode.getFullPathName());
    
    // Call FSDirectory#unprotectedSetQuota which calls
    // INodeDirectory#replaceChild
    dfs.setQuota(dir, Long.MAX_VALUE - 1, replication * fileLen * 10);
    INodeDirectory dirNode = getDir(fsdir, dir);
    assertEquals(dir.toString(), dirNode.getFullPathName());
    assertTrue(dirNode.isWithQuota());
    
    final Path newDir = new Path("/newdir");
    final Path newFile = new Path(newDir, "file");
    // Also rename dir
    dfs.rename(dir, newDir, Options.Rename.OVERWRITE);
    // /dir/file now should be renamed to /newdir/file
    fnode = fsdir.getINode(newFile.toString());
    // getFullPathName can return correct result only if the parent field of
    // child node is set correctly
    assertEquals(newFile.toString(), fnode.getFullPathName());
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:53,代码来源:TestINodeFile.java

示例9: testTriggerBlockIdCollision

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
/**
 * Test that collisions in the block ID space are handled gracefully.
 *
 * @throws IOException
 */
@Test
public void testTriggerBlockIdCollision() throws IOException {
  Configuration conf = new HdfsConfiguration();
  conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
  MiniDFSCluster cluster =
      new MiniDFSCluster.Builder(conf).numDataNodes(1).build();

  try {
    cluster.waitActive();
    FileSystem fs = cluster.getFileSystem();
    FSNamesystem fsn = cluster.getNamesystem();
    final int blockCount = 10;


    // Create a file with a few blocks to rev up the global block ID
    // counter.
    Path path1 = new Path("testBlockIdCollisionDetection_file1.dat");
    DFSTestUtil.createFile(
        fs, path1, IO_SIZE, BLOCK_SIZE * blockCount,
        BLOCK_SIZE, REPLICATION, SEED);
    List<LocatedBlock> blocks1 = DFSTestUtil.getAllBlocks(fs, path1);


    // Rewind the block ID counter in the name system object. This will result
    // in block ID collisions when we try to allocate new blocks.
    SequentialBlockIdGenerator blockIdGenerator = fsn.getBlockIdManager()
      .getBlockIdGenerator();
    blockIdGenerator.setCurrentValue(blockIdGenerator.getCurrentValue() - 5);

    // Trigger collisions by creating a new file.
    Path path2 = new Path("testBlockIdCollisionDetection_file2.dat");
    DFSTestUtil.createFile(
        fs, path2, IO_SIZE, BLOCK_SIZE * blockCount,
        BLOCK_SIZE, REPLICATION, SEED);
    List<LocatedBlock> blocks2 = DFSTestUtil.getAllBlocks(fs, path2);
    assertThat(blocks2.size(), is(blockCount));

    // Make sure that file2 block IDs start immediately after file1
    assertThat(blocks2.get(0).getBlock().getBlockId(),
               is(blocks1.get(9).getBlock().getBlockId() + 1));

  } finally {
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:51,代码来源:TestSequentialBlockId.java

示例10: testWithReplicationFactorAsOne

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
/**
 * The corrupt block has to be removed when the number of valid replicas
 * matches replication factor for the file. The above condition should hold
 * true as long as there is one good replica. This test verifies that.
 * 
 * The test strategy : 
 *   Bring up Cluster with 2 DataNodes
 *   Create a file of replication factor 2 
 *   Corrupt one replica of a block of the file 
 *   Verify that there is  one good replicas and 1 corrupt replica 
 *     (corrupt replica should not be removed since number of good 
 *     replicas (1) is less than replication factor (2)).
 *   Set the replication factor to 1 
 *   Verify that the corrupt replica is removed. 
 *     (corrupt replica should  be removed since number of good
 *      replicas (1) is equal to replication factor (1))
 */
@Test(timeout=20000)
public void testWithReplicationFactorAsOne() throws Exception {
  Configuration conf = new HdfsConfiguration();
  conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L);
  conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, Integer.toString(2));
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
  FileSystem fs = cluster.getFileSystem();
  final FSNamesystem namesystem = cluster.getNamesystem();

  try {
    final Path fileName = new Path("/foo1");
    DFSTestUtil.createFile(fs, fileName, 2, (short) 2, 0L);
    DFSTestUtil.waitReplication(fs, fileName, (short) 2);

    ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, fileName);
    corruptBlock(cluster, fs, fileName, 0, block);

    DFSTestUtil.waitReplication(fs, fileName, (short) 1);

    assertEquals(1, countReplicas(namesystem, block).liveReplicas());
    assertEquals(1, countReplicas(namesystem, block).corruptReplicas());

    namesystem.setReplication(fileName.toString(), (short) 1);

    // wait for 3 seconds so that all block reports are processed.
    for (int i = 0; i < 10; i++) {
      try {
        Thread.sleep(1000);
      } catch (InterruptedException ignored) {
      }
      if (countReplicas(namesystem, block).corruptReplicas() == 0) {
        break;
      }
    }

    assertEquals(1, countReplicas(namesystem, block).liveReplicas());
    assertEquals(0, countReplicas(namesystem, block).corruptReplicas());

  } finally {
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:60,代码来源:TestProcessCorruptBlocks.java

示例11: testPendingAndInvalidate

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
/**
 * Test if BlockManager can correctly remove corresponding pending records
 * when a file is deleted
 * 
 * @throws Exception
 */
@Test
public void testPendingAndInvalidate() throws Exception {
  final Configuration CONF = new HdfsConfiguration();
  CONF.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024);
  CONF.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY,
      DFS_REPLICATION_INTERVAL);
  CONF.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 
      DFS_REPLICATION_INTERVAL);
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(
      DATANODE_COUNT).build();
  cluster.waitActive();
  
  FSNamesystem namesystem = cluster.getNamesystem();
  BlockManager bm = namesystem.getBlockManager();
  DistributedFileSystem fs = cluster.getFileSystem();
  try {
    // 1. create a file
    Path filePath = new Path("/tmp.txt");
    DFSTestUtil.createFile(fs, filePath, 1024, (short) 3, 0L);
    
    // 2. disable the heartbeats
    for (DataNode dn : cluster.getDataNodes()) {
      DataNodeTestUtils.setHeartbeatsDisabledForTests(dn, true);
    }
    
    // 3. mark a couple of blocks as corrupt
    LocatedBlock block = NameNodeAdapter.getBlockLocations(
        cluster.getNameNode(), filePath.toString(), 0, 1).get(0);
    cluster.getNamesystem().writeLock();
    try {
      bm.findAndMarkBlockAsCorrupt(block.getBlock(), block.getLocations()[0],
          "STORAGE_ID", "TEST");
      bm.findAndMarkBlockAsCorrupt(block.getBlock(), block.getLocations()[1],
          "STORAGE_ID", "TEST");
    } finally {
      cluster.getNamesystem().writeUnlock();
    }
    BlockManagerTestUtil.computeAllPendingWork(bm);
    BlockManagerTestUtil.updateState(bm);
    assertEquals(bm.getPendingReplicationBlocksCount(), 1L);
    assertEquals(bm.pendingReplications.getNumReplicas(block.getBlock()
        .getLocalBlock()), 2);
    
    // 4. delete the file
    fs.delete(filePath, true);
    // retry at most 10 times, each time sleep for 1s. Note that 10s is much
    // less than the default pending record timeout (5~10min)
    int retries = 10; 
    long pendingNum = bm.getPendingReplicationBlocksCount();
    while (pendingNum != 0 && retries-- > 0) {
      Thread.sleep(1000);  // let NN do the deletion
      BlockManagerTestUtil.updateState(bm);
      pendingNum = bm.getPendingReplicationBlocksCount();
    }
    assertEquals(pendingNum, 0L);
  } finally {
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:66,代码来源:TestPendingReplication.java

示例12: testDisplayRecentEditLogOpCodes

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
@Test
public void testDisplayRecentEditLogOpCodes() throws IOException {
  // start a cluster 
  Configuration conf = new HdfsConfiguration();
  MiniDFSCluster cluster = null;
  FileSystem fileSys = null;
  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES)
      .enableManagedDfsDirsRedundancy(false).build();
  cluster.waitActive();
  fileSys = cluster.getFileSystem();
  final FSNamesystem namesystem = cluster.getNamesystem();

  FSImage fsimage = namesystem.getFSImage();
  for (int i = 0; i < 20; i++) {
    fileSys.mkdirs(new Path("/tmp/tmp" + i));
  }
  StorageDirectory sd = fsimage.getStorage().dirIterator(NameNodeDirType.EDITS).next();
  cluster.shutdown();

  File editFile = FSImageTestUtil.findLatestEditsLog(sd).getFile();
  assertTrue("Should exist: " + editFile, editFile.exists());

  // Corrupt the edits file.
  long fileLen = editFile.length();
  RandomAccessFile rwf = new RandomAccessFile(editFile, "rw");
  rwf.seek(fileLen - 40);
  for (int i = 0; i < 20; i++) {
    rwf.write(FSEditLogOpCodes.OP_DELETE.getOpCode());
  }
  rwf.close();
  
  StringBuilder bld = new StringBuilder();
  bld.append("^Error replaying edit log at offset \\d+.  ");
  bld.append("Expected transaction ID was \\d+\n");
  bld.append("Recent opcode offsets: (\\d+\\s*){4}$");
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES)
        .enableManagedDfsDirsRedundancy(false).format(false).build();
    fail("should not be able to start");
  } catch (IOException e) {
    assertTrue("error message contains opcodes message",
        e.getMessage().matches(bld.toString()));
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:45,代码来源:TestFSEditLogLoader.java

示例13: testSimpleEditLog

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
/**
 * Simple test for writing to and rolling the edit log.
 */
@Test
public void testSimpleEditLog() throws IOException {
  // start a cluster 
  Configuration conf = new HdfsConfiguration();
  MiniDFSCluster cluster = null;
  FileSystem fileSys = null;
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).build();
    cluster.waitActive();
    fileSys = cluster.getFileSystem();
    final FSNamesystem namesystem = cluster.getNamesystem();
    FSImage fsimage = namesystem.getFSImage();
    final FSEditLog editLog = fsimage.getEditLog();
    
    assertExistsInStorageDirs(
        cluster, NameNodeDirType.EDITS, 
        NNStorage.getInProgressEditsFileName(1));
    

    editLog.logSetReplication("fakefile", (short) 1);
    editLog.logSync();
    
    editLog.rollEditLog();

    assertExistsInStorageDirs(
        cluster, NameNodeDirType.EDITS,
        NNStorage.getFinalizedEditsFileName(1,3));
    assertExistsInStorageDirs(
        cluster, NameNodeDirType.EDITS,
        NNStorage.getInProgressEditsFileName(4));

    
    editLog.logSetReplication("fakefile", (short) 2);
    editLog.logSync();
    
    editLog.close();
  } finally {
    if(fileSys != null) fileSys.close();
    if(cluster != null) cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:45,代码来源:TestEditLog.java

示例14: testSyncBatching

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
@Test
public void testSyncBatching() throws Exception {
  // start a cluster 
  Configuration conf = new HdfsConfiguration();
  MiniDFSCluster cluster = null;
  FileSystem fileSys = null;
  ExecutorService threadA = Executors.newSingleThreadExecutor();
  ExecutorService threadB = Executors.newSingleThreadExecutor();
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).build();
    cluster.waitActive();
    fileSys = cluster.getFileSystem();
    final FSNamesystem namesystem = cluster.getNamesystem();

    FSImage fsimage = namesystem.getFSImage();
    final FSEditLog editLog = fsimage.getEditLog();

    assertEquals("should start with only the BEGIN_LOG_SEGMENT txn synced",
      1, editLog.getSyncTxId());
    
    // Log an edit from thread A
    doLogEdit(threadA, editLog, "thread-a 1");
    assertEquals("logging edit without syncing should do not affect txid",
      1, editLog.getSyncTxId());

    // Log an edit from thread B
    doLogEdit(threadB, editLog, "thread-b 1");
    assertEquals("logging edit without syncing should do not affect txid",
      1, editLog.getSyncTxId());

    // Now ask to sync edit from B, which should sync both edits.
    doCallLogSync(threadB, editLog);
    assertEquals("logSync from second thread should bump txid up to 3",
      3, editLog.getSyncTxId());

    // Now ask to sync edit from A, which was already batched in - thus
    // it should increment the batch count metric
    doCallLogSync(threadA, editLog);
    assertEquals("logSync from first thread shouldn't change txid",
      3, editLog.getSyncTxId());

    //Should have incremented the batch count exactly once
    assertCounter("TransactionsBatchedInSync", 1L, 
      getMetrics("NameNodeActivity"));
  } finally {
    threadA.shutdown();
    threadB.shutdown();
    if(fileSys != null) fileSys.close();
    if(cluster != null) cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:52,代码来源:TestEditLog.java

示例15: testEditChecksum

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
@Test
public void testEditChecksum() throws Exception {
  // start a cluster 
  Configuration conf = new HdfsConfiguration();
  MiniDFSCluster cluster = null;
  FileSystem fileSys = null;
  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).build();
  cluster.waitActive();
  fileSys = cluster.getFileSystem();
  final FSNamesystem namesystem = cluster.getNamesystem();

  FSImage fsimage = namesystem.getFSImage();
  final FSEditLog editLog = fsimage.getEditLog();
  fileSys.mkdirs(new Path("/tmp"));

  Iterator<StorageDirectory> iter = fsimage.getStorage().
    dirIterator(NameNodeDirType.EDITS);
  LinkedList<StorageDirectory> sds = new LinkedList<StorageDirectory>();
  while (iter.hasNext()) {
    sds.add(iter.next());
  }
  editLog.close();
  cluster.shutdown();

  for (StorageDirectory sd : sds) {
    File editFile = NNStorage.getFinalizedEditsFile(sd, 1, 3);
    assertTrue(editFile.exists());

    long fileLen = editFile.length();
    LOG.debug("Corrupting Log File: " + editFile + " len: " + fileLen);
    RandomAccessFile rwf = new RandomAccessFile(editFile, "rw");
    rwf.seek(fileLen-4); // seek to checksum bytes
    int b = rwf.readInt();
    rwf.seek(fileLen-4);
    rwf.writeInt(b+1);
    rwf.close();
  }
  
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).format(false).build();
    fail("should not be able to start");
  } catch (IOException e) {
    // expected
    assertNotNull("Cause of exception should be ChecksumException", e.getCause());
    assertEquals("Cause of exception should be ChecksumException",
        ChecksumException.class, e.getCause().getClass());
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:49,代码来源:TestEditLog.java


注:本文中的org.apache.hadoop.hdfs.MiniDFSCluster.getNamesystem方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。