当前位置: 首页>>代码示例>>Java>>正文


Java MiniDFSCluster.waitActive方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.MiniDFSCluster.waitActive方法的典型用法代码示例。如果您正苦于以下问题:Java MiniDFSCluster.waitActive方法的具体用法?Java MiniDFSCluster.waitActive怎么用?Java MiniDFSCluster.waitActive使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.MiniDFSCluster的用法示例。


在下文中一共展示了MiniDFSCluster.waitActive方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testNotConfigured

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
/** 
 * Test that an exception is thrown if a journal class doesn't exist
 * in the configuration 
 */
@Test(expected=IllegalArgumentException.class)
public void testNotConfigured() throws Exception {
  MiniDFSCluster cluster = null;
  Configuration conf = new Configuration();

  conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
           "dummy://test");
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
    cluster.waitActive();
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:21,代码来源:TestGenericJournalConf.java

示例2: testNoFadviseAfterWriteThenRead

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
@Test(timeout=120000)
public void testNoFadviseAfterWriteThenRead() throws Exception {
  // start a cluster
  LOG.info("testNoFadviseAfterWriteThenRead");
  tracker.clear();
  Configuration conf = new HdfsConfiguration();
  MiniDFSCluster cluster = null;
  String TEST_PATH = "/test";
  int TEST_PATH_LEN = MAX_TEST_FILE_LEN;
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
        .build();
    cluster.waitActive();
    FileSystem fs = cluster.getFileSystem();

    // create new file
    createHdfsFile(fs, new Path(TEST_PATH), TEST_PATH_LEN, false);
    // verify that we did not drop everything from the cache during file creation.
    ExtendedBlock block = cluster.getNameNode().getRpcServer().getBlockLocations(
        TEST_PATH, 0, Long.MAX_VALUE).get(0).getBlock();
    String fadvisedFileName = cluster.getBlockFile(0, block).getName();
    Stats stats = tracker.getStats(fadvisedFileName);
    Assert.assertNull(stats);
    
    // read file
    readHdfsFile(fs, new Path(TEST_PATH), Long.MAX_VALUE, false);
    // verify that we dropped everything from the cache.
    Assert.assertNull(stats);
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:35,代码来源:TestCachingStrategy.java

示例3: testFadviseAfterWriteThenRead

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
@Test(timeout=120000)
public void testFadviseAfterWriteThenRead() throws Exception {
  // start a cluster
  LOG.info("testFadviseAfterWriteThenRead");
  tracker.clear();
  Configuration conf = new HdfsConfiguration();
  MiniDFSCluster cluster = null;
  String TEST_PATH = "/test";
  int TEST_PATH_LEN = MAX_TEST_FILE_LEN;
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
        .build();
    cluster.waitActive();
    FileSystem fs = cluster.getFileSystem();

    // create new file
    createHdfsFile(fs, new Path(TEST_PATH), TEST_PATH_LEN, true);
    // verify that we dropped everything from the cache during file creation.
    ExtendedBlock block = cluster.getNameNode().getRpcServer().getBlockLocations(
        TEST_PATH, 0, Long.MAX_VALUE).get(0).getBlock();
    String fadvisedFileName = cluster.getBlockFile(0, block).getName();
    Stats stats = tracker.getStats(fadvisedFileName);
    stats.assertDroppedInRange(0, TEST_PATH_LEN - WRITE_PACKET_SIZE);
    stats.clear();
    
    // read file
    readHdfsFile(fs, new Path(TEST_PATH), Long.MAX_VALUE, true);
    // verify that we dropped everything from the cache.
    Assert.assertNotNull(stats);
    stats.assertDroppedInRange(0, TEST_PATH_LEN - WRITE_PACKET_SIZE);
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:37,代码来源:TestCachingStrategy.java

示例4: testWebHdfsRenameSnapshot

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
/**
 * Test snapshot rename through WebHdfs
 */
@Test
public void testWebHdfsRenameSnapshot() throws Exception {
  MiniDFSCluster cluster = null;
  final Configuration conf = WebHdfsTestUtil.createConf();
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
    cluster.waitActive();
    final DistributedFileSystem dfs = cluster.getFileSystem();
    final FileSystem webHdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf,
        WebHdfsFileSystem.SCHEME);

    final Path foo = new Path("/foo");
    dfs.mkdirs(foo);
    dfs.allowSnapshot(foo);

    webHdfs.createSnapshot(foo, "s1");
    final Path s1path = SnapshotTestHelper.getSnapshotRoot(foo, "s1");
    Assert.assertTrue(webHdfs.exists(s1path));

    // rename s1 to s2
    webHdfs.renameSnapshot(foo, "s1", "s2");
    Assert.assertFalse(webHdfs.exists(s1path));
    final Path s2path = SnapshotTestHelper.getSnapshotRoot(foo, "s2");
    Assert.assertTrue(webHdfs.exists(s2path));

    webHdfs.deleteSnapshot(foo, "s2");
    Assert.assertFalse(webHdfs.exists(s2path));
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:37,代码来源:TestWebHDFS.java

示例5: test

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
@Test
public void test() throws Exception {
  Configuration conf = new Configuration();
  MiniDFSCluster cluster = null;

  try {
    cluster = new MiniDFSCluster.Builder(conf).build();
    cluster.waitActive();

    FSNamesystem fsn = cluster.getNameNode().namesystem;

    MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
    ObjectName mxbeanName = new ObjectName(
        "Hadoop:service=NameNode,name=FSNamesystemState");

    String snapshotStats = (String) (mbs.getAttribute(mxbeanName,
        "SnapshotStats"));

    @SuppressWarnings("unchecked")
    Map<String, Object> stat = (Map<String, Object>) JSON
        .parse(snapshotStats);

    assertTrue(stat.containsKey("SnapshottableDirectories")
        && (Long) stat.get("SnapshottableDirectories") == fsn
            .getNumSnapshottableDirs());
    assertTrue(stat.containsKey("Snapshots")
        && (Long) stat.get("Snapshots") == fsn.getNumSnapshots());

    Object pendingDeletionBlocks = mbs.getAttribute(mxbeanName,
      "PendingDeletionBlocks");
    assertNotNull(pendingDeletionBlocks);
    assertTrue(pendingDeletionBlocks instanceof Long);
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:39,代码来源:TestFSNamesystemMBean.java

示例6: createCheckPoint

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
/**
 * Create a number of fsimage checkpoints
 * @param count number of checkpoints to create
 * @throws IOException
 */
public void createCheckPoint(int count) throws IOException {
  LOG.info("--starting mini cluster");
  // manage dirs parameter set to false 
  MiniDFSCluster cluster = null;
  SecondaryNameNode sn = null;
  
  try {
    cluster = new MiniDFSCluster.Builder(config)
                                .manageDataDfsDirs(false)
                                .manageNameDfsDirs(false).build();
    cluster.waitActive();

    LOG.info("--starting Secondary Node");

    // start secondary node
    sn = new SecondaryNameNode(config);
    assertNotNull(sn);

    // Create count new files and checkpoints
    for (int i=0; i<count; i++) {
      // create a file
      FileSystem fileSys = cluster.getFileSystem();
      Path p = new Path("t" + i);
      this.writeFile(fileSys, p, 1);
      LOG.info("--file " + p.toString() + " created");
      LOG.info("--doing checkpoint");
      sn.doCheckpoint();  // this shouldn't fail
      LOG.info("--done checkpoint");
    }
  } catch (IOException e) {
    fail(StringUtils.stringifyException(e));
    System.err.println("checkpoint failed");
    throw e;
  }  finally {
    if(sn!=null)
      sn.shutdown();
    if(cluster!=null) 
      cluster.shutdown();
    LOG.info("--cluster shutdown");
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:47,代码来源:TestStartup.java

示例7: testlistCorruptFileBlocksDFS

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
/**
 * test listCorruptFileBlocks in DistributedFileSystem
 */
@Test (timeout=300000)
public void testlistCorruptFileBlocksDFS() throws Exception {
  Configuration conf = new Configuration();
  conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000);
  conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, 1); // datanode scans
                                                         // directories
  FileSystem fs = null;

  MiniDFSCluster cluster = null;
  try {
    cluster = new MiniDFSCluster.Builder(conf).build();
    cluster.waitActive();
    fs = cluster.getFileSystem();
    DistributedFileSystem dfs = (DistributedFileSystem) fs;
    DFSTestUtil util = new DFSTestUtil.Builder().
        setName("testGetCorruptFiles").setNumFiles(3).
        setMaxLevels(1).setMaxSize(1024).build();
    util.createFiles(fs, "/corruptData");

    RemoteIterator<Path> corruptFileBlocks = 
      dfs.listCorruptFileBlocks(new Path("/corruptData"));
    int numCorrupt = countPaths(corruptFileBlocks);
    assertTrue(numCorrupt == 0);
    // delete the blocks
    String bpid = cluster.getNamesystem().getBlockPoolId();
    // For loop through number of datadirectories per datanode (2)
    for (int i = 0; i < 2; i++) {
      File storageDir = cluster.getInstanceStorageDir(0, i);
      File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
      List<File> metadataFiles = MiniDFSCluster.getAllBlockMetadataFiles(
          data_dir);
      if (metadataFiles == null)
        continue;
      // assertTrue("Blocks do not exist in data-dir", (blocks != null) &&
      // (blocks.length > 0));
      for (File metadataFile : metadataFiles) {
        File blockFile = Block.metaToBlockFile(metadataFile);
        LOG.info("Deliberately removing file " + blockFile.getName());
        assertTrue("Cannot remove file.", blockFile.delete());
        LOG.info("Deliberately removing file " + metadataFile.getName());
        assertTrue("Cannot remove file.", metadataFile.delete());
        // break;
      }
    }

    int count = 0;
    corruptFileBlocks = dfs.listCorruptFileBlocks(new Path("/corruptData"));
    numCorrupt = countPaths(corruptFileBlocks);
    while (numCorrupt < 3) {
      Thread.sleep(1000);
      corruptFileBlocks = dfs.listCorruptFileBlocks(new Path("/corruptData"));
      numCorrupt = countPaths(corruptFileBlocks);
      count++;
      if (count > 30)
        break;
    }
    // Validate we get all the corrupt files
    LOG.info("Namenode has bad files. " + numCorrupt);
    assertTrue(numCorrupt == 3);

    util.cleanup(fs, "/corruptData");
    util.cleanup(fs, "/goodData");
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:72,代码来源:TestListCorruptFileBlocks.java

示例8: testEditLog

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
/**
 * Tests transaction logging in dfs.
 */
@Test
public void testEditLog() throws IOException {

  // start a cluster 
  Configuration conf = new HdfsConfiguration();
  MiniDFSCluster cluster = null;
  FileSystem fileSys = null;

  try {
    conf.setBoolean(
        DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);

    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).build();
    cluster.waitActive();
    fileSys = cluster.getFileSystem();
    final FSNamesystem namesystem = cluster.getNamesystem();

    for (Iterator<URI> it = cluster.getNameDirs(0).iterator(); it.hasNext(); ) {
      File dir = new File(it.next().getPath());
      System.out.println(dir);
    }
    
    FSImage fsimage = namesystem.getFSImage();
    FSEditLog editLog = fsimage.getEditLog();

    // set small size of flush buffer
    editLog.setOutputBufferCapacity(2048);
  
    // Create threads and make them run transactions concurrently.
    Thread threadId[] = new Thread[NUM_THREADS];
    for (int i = 0; i < NUM_THREADS; i++) {
      Transactions trans = new Transactions(namesystem, NUM_TRANSACTIONS);
      threadId[i] = new Thread(trans, "TransactionThread-" + i);
      threadId[i].start();
    }

    // wait for all transactions to get over
    for (int i = 0; i < NUM_THREADS; i++) {
      try {
        threadId[i].join();
      } catch (InterruptedException e) {
        i--;      // retry 
      }
    } 
    
    editLog.close();
      
    // Verify that we can read in all the transactions that we have written.
    // If there were any corruptions, it is likely that the reading in
    // of these transactions will throw an exception.
    //
    namesystem.getDelegationTokenSecretManager().stopThreads();
    int numKeys = namesystem.getDelegationTokenSecretManager().getNumberOfKeys();
    int expectedTransactions = NUM_THREADS * opsPerTrans * NUM_TRANSACTIONS + numKeys
        + 2; // + 2 for BEGIN and END txns

    for (StorageDirectory sd : fsimage.getStorage().dirIterable(NameNodeDirType.EDITS)) {
      File editFile = NNStorage.getFinalizedEditsFile(sd, 1, 1 + expectedTransactions - 1);
      System.out.println("Verifying file: " + editFile);
      
      FSEditLogLoader loader = new FSEditLogLoader(namesystem, 0);        
      long numEdits = loader.loadFSEdits(
          new EditLogFileInputStream(editFile), 1);
      assertEquals("Verification for " + editFile, expectedTransactions, numEdits);
    }
  } finally {
    if(fileSys != null) fileSys.close();
    if(cluster != null) cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:74,代码来源:TestSecurityTokenEditLog.java

示例9: testSaveNamespace

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
/**
 * Tests saving fs image while transactions are ongoing.
 */
@Test
public void testSaveNamespace() throws Exception {
  // start a cluster 
  Configuration conf = new HdfsConfiguration();
  MiniDFSCluster cluster = null;
  FileSystem fileSys = null;

  AtomicReference<Throwable> caughtErr = new AtomicReference<Throwable>();
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).build();
    cluster.waitActive();
    fileSys = cluster.getFileSystem();
    final FSNamesystem namesystem = cluster.getNamesystem();
    final NamenodeProtocols nn = cluster.getNameNodeRpc();

    FSImage fsimage = namesystem.getFSImage();
    FSEditLog editLog = fsimage.getEditLog();

    startTransactionWorkers(nn, caughtErr);

    for (int i = 0; i < NUM_SAVE_IMAGE && caughtErr.get() == null; i++) {
      try {
        Thread.sleep(20);
      } catch (InterruptedException ignored) {}


      LOG.info("Save " + i + ": entering safe mode");
      namesystem.enterSafeMode(false);

      // Verify edit logs before the save
      // They should start with the first edit after the checkpoint
      long logStartTxId = fsimage.getStorage().getMostRecentCheckpointTxId() + 1; 
      verifyEditLogs(namesystem, fsimage,
          NNStorage.getInProgressEditsFileName(logStartTxId),
          logStartTxId);


      LOG.info("Save " + i + ": saving namespace");
      namesystem.saveNamespace();
      LOG.info("Save " + i + ": leaving safemode");

      long savedImageTxId = fsimage.getStorage().getMostRecentCheckpointTxId();
      
      // Verify that edit logs post save got finalized and aren't corrupt
      verifyEditLogs(namesystem, fsimage,
          NNStorage.getFinalizedEditsFileName(logStartTxId, savedImageTxId),
          logStartTxId);
      
      // The checkpoint id should be 1 less than the last written ID, since
      // the log roll writes the "BEGIN" transaction to the new log.
      assertEquals(fsimage.getStorage().getMostRecentCheckpointTxId(),
                   editLog.getLastWrittenTxId() - 1);

      namesystem.leaveSafeMode();
      LOG.info("Save " + i + ": complete");
    }
  } finally {
    stopTransactionWorkers();
    if (caughtErr.get() != null) {
      throw new RuntimeException(caughtErr.get());
    }
    if(fileSys != null) fileSys.close();
    if(cluster != null) cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:69,代码来源:TestEditLogRace.java

示例10: doTestWriteOverFailoverWithDnFail

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
private void doTestWriteOverFailoverWithDnFail(TestScenario scenario)
    throws Exception {
  Configuration conf = new Configuration();
  conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
  
  FSDataOutputStream stm = null;
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
    .nnTopology(MiniDFSNNTopology.simpleHATopology())
    .numDataNodes(5)
    .build();
  try {
    cluster.waitActive();
    cluster.transitionToActive(0);
    Thread.sleep(500);

    LOG.info("Starting with NN 0 active");
    FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf);
    stm = fs.create(TEST_PATH);
    
    // write a block and a half
    AppendTestUtil.write(stm, 0, BLOCK_AND_A_HALF);
    
    // Make sure all the blocks are written before failover
    stm.hflush();

    LOG.info("Failing over to NN 1");
    scenario.run(cluster);

    assertTrue(fs.exists(TEST_PATH));
    
    cluster.stopDataNode(0);

    // write another block and a half
    AppendTestUtil.write(stm, BLOCK_AND_A_HALF, BLOCK_AND_A_HALF);
    stm.hflush();
    
    LOG.info("Failing back to NN 0");
    cluster.transitionToStandby(1);
    cluster.transitionToActive(0);
    
    cluster.stopDataNode(1);
    
    AppendTestUtil.write(stm, BLOCK_AND_A_HALF*2, BLOCK_AND_A_HALF);
    stm.hflush();
    
    
    stm.close();
    stm = null;
    
    AppendTestUtil.check(fs, TEST_PATH, BLOCK_AND_A_HALF * 3);
  } finally {
    IOUtils.closeStream(stm);
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:56,代码来源:TestPipelinesFailover.java

示例11: testCheckThatNameNodeResourceMonitorIsRunning

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
/**
 * Tests that NameNode resource monitor causes the NN to enter safe mode when
 * resources are low.
 */
@Test
public void testCheckThatNameNodeResourceMonitorIsRunning()
    throws IOException, InterruptedException {
  MiniDFSCluster cluster = null;
  try {
    conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, nameDir.getAbsolutePath());
    conf.setLong(DFSConfigKeys.DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_KEY, 1);
    
    cluster = new MiniDFSCluster.Builder(conf)
        .numDataNodes(1).build();

    NameNodeResourceChecker mockResourceChecker = Mockito.mock(NameNodeResourceChecker.class);
    Mockito.when(mockResourceChecker.hasAvailableDiskSpace()).thenReturn(true);
    cluster.getNameNode().getNamesystem().nnResourceChecker = mockResourceChecker;

    cluster.waitActive();

    String name = NameNodeResourceMonitor.class.getName();

    boolean isNameNodeMonitorRunning = false;
    Set<Thread> runningThreads = Thread.getAllStackTraces().keySet();
    for (Thread runningThread : runningThreads) {
      if (runningThread.toString().startsWith("Thread[" + name)) {
        isNameNodeMonitorRunning = true;
        break;
      }
    }
    assertTrue("NN resource monitor should be running",
        isNameNodeMonitorRunning);
    assertFalse("NN should not presently be in safe mode",
        cluster.getNameNode().isInSafeMode());
    
    Mockito.when(mockResourceChecker.hasAvailableDiskSpace()).thenReturn(false);

    // Make sure the NNRM thread has a chance to run.
    long startMillis = Time.now();
    while (!cluster.getNameNode().isInSafeMode() &&
        Time.now() < startMillis + (60 * 1000)) {
      Thread.sleep(1000);
    }

    assertTrue("NN should be in safe mode after resources crossed threshold",
        cluster.getNameNode().isInSafeMode());
  } finally {
    if (cluster != null)
      cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:53,代码来源:TestNameNodeResourceChecker.java

示例12: test2NNRegistration

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
/**
 * start multiple NNs and single DN and verifies per BP registrations and
 * handshakes.
 * 
 * @throws IOException
 */
@Test
public void test2NNRegistration() throws IOException {
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
      .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(2))
      .build();
  try {
    cluster.waitActive();
    NameNode nn1 = cluster.getNameNode(0);
    NameNode nn2 = cluster.getNameNode(1);
    assertNotNull("cannot create nn1", nn1);
    assertNotNull("cannot create nn2", nn2);

    String bpid1 = FSImageTestUtil.getFSImage(nn1).getBlockPoolID();
    String bpid2 = FSImageTestUtil.getFSImage(nn2).getBlockPoolID();
    String cid1 = FSImageTestUtil.getFSImage(nn1).getClusterID();
    String cid2 = FSImageTestUtil.getFSImage(nn2).getClusterID();
    int lv1 =FSImageTestUtil.getFSImage(nn1).getLayoutVersion();
    int lv2 = FSImageTestUtil.getFSImage(nn2).getLayoutVersion();
    int ns1 = FSImageTestUtil.getFSImage(nn1).getNamespaceID();
    int ns2 = FSImageTestUtil.getFSImage(nn2).getNamespaceID();
    assertNotSame("namespace ids should be different", ns1, ns2);
    LOG.info("nn1: lv=" + lv1 + ";cid=" + cid1 + ";bpid=" + bpid1 + ";uri="
        + nn1.getNameNodeAddress());
    LOG.info("nn2: lv=" + lv2 + ";cid=" + cid2 + ";bpid=" + bpid2 + ";uri="
        + nn2.getNameNodeAddress());

    // check number of volumes in fsdataset
    DataNode dn = cluster.getDataNodes().get(0);
    final Map<String, Object> volInfos = dn.data.getVolumeInfoMap();
    Assert.assertTrue("No volumes in the fsdataset", volInfos.size() > 0);
    int i = 0;
    for (Map.Entry<String, Object> e : volInfos.entrySet()) {
      LOG.info("vol " + i++ + ") " + e.getKey() + ": " + e.getValue());
    }
    // number of volumes should be 2 - [data1, data2]
    assertEquals("number of volumes is wrong", 2, volInfos.size());

    for (BPOfferService bpos : dn.getAllBpOs()) {
      LOG.info("BP: " + bpos);
    }

    BPOfferService bpos1 = dn.getAllBpOs()[0];
    BPOfferService bpos2 = dn.getAllBpOs()[1];

    // The order of bpos is not guaranteed, so fix the order
    if (getNNSocketAddress(bpos1).equals(nn2.getNameNodeAddress())) {
      BPOfferService tmp = bpos1;
      bpos1 = bpos2;
      bpos2 = tmp;
    }

    assertEquals("wrong nn address", getNNSocketAddress(bpos1),
        nn1.getNameNodeAddress());
    assertEquals("wrong nn address", getNNSocketAddress(bpos2),
        nn2.getNameNodeAddress());
    assertEquals("wrong bpid", bpos1.getBlockPoolId(), bpid1);
    assertEquals("wrong bpid", bpos2.getBlockPoolId(), bpid2);
    assertEquals("wrong cid", dn.getClusterId(), cid1);
    assertEquals("cid should be same", cid2, cid1);
    assertEquals("namespace should be same",
        bpos1.bpNSInfo.namespaceID, ns1);
    assertEquals("namespace should be same",
        bpos2.bpNSInfo.namespaceID, ns2);
  } finally {
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:74,代码来源:TestDataNodeMultipleRegistrations.java

示例13: testDataXceiverHandlesRequestShortCircuitShmFailure

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
@Test(timeout=60000)
public void testDataXceiverHandlesRequestShortCircuitShmFailure()
    throws Exception {
  BlockReaderTestUtil.enableShortCircuitShmTracing();
  TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
  Configuration conf = createShortCircuitConf(
      "testDataXceiverHandlesRequestShortCircuitShmFailure", sockDir);
  conf.setLong(DFS_CLIENT_READ_SHORTCIRCUIT_STREAMS_CACHE_EXPIRY_MS_KEY,
      1000000000L);
  MiniDFSCluster cluster =
      new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
  cluster.waitActive();
  DistributedFileSystem fs = cluster.getFileSystem();
  final Path TEST_PATH1 = new Path("/test_file1");
  DFSTestUtil.createFile(fs, TEST_PATH1, 4096,
      (short)1, 0xFADE1);
  LOG.info("Setting failure injector and performing a read which " +
      "should fail...");
  DataNodeFaultInjector failureInjector = Mockito.mock(DataNodeFaultInjector.class);
  Mockito.doAnswer(new Answer<Void>() {
    @Override
    public Void answer(InvocationOnMock invocation) throws Throwable {
      throw new IOException("injected error into sendShmResponse");
    }
  }).when(failureInjector).sendShortCircuitShmResponse();
  DataNodeFaultInjector prevInjector = DataNodeFaultInjector.instance;
  DataNodeFaultInjector.instance = failureInjector;

  try {
    // The first read will try to allocate a shared memory segment and slot.
    // The shared memory segment allocation will fail because of the failure
    // injector.
    DFSTestUtil.readFileBuffer(fs, TEST_PATH1);
    Assert.fail("expected readFileBuffer to fail, but it succeeded.");
  } catch (Throwable t) {
    GenericTestUtils.assertExceptionContains("TCP reads were disabled for " +
        "testing, but we failed to do a non-TCP read.", t);
  }

  checkNumberOfSegmentsAndSlots(0, 0,
      cluster.getDataNodes().get(0).getShortCircuitRegistry());

  LOG.info("Clearing failure injector and performing another read...");
  DataNodeFaultInjector.instance = prevInjector;

  fs.getClient().getClientContext().getDomainSocketFactory().clearPathMap();

  // The second read should succeed.
  DFSTestUtil.readFileBuffer(fs, TEST_PATH1);

  // We should have added a new short-circuit shared memory segment and slot.
  checkNumberOfSegmentsAndSlots(1, 1,
      cluster.getDataNodes().get(0).getShortCircuitRegistry());

  cluster.shutdown();
  sockDir.close();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:58,代码来源:TestShortCircuitCache.java

示例14: testOOOWrites

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
@Test
public void testOOOWrites() throws IOException, InterruptedException {
  NfsConfiguration config = new NfsConfiguration();
  MiniDFSCluster cluster = null;
  RpcProgramNfs3 nfsd;
  final int bufSize = 32;
  final int numOOO = 3;
  SecurityHandler securityHandler = Mockito.mock(SecurityHandler.class);
  Mockito.when(securityHandler.getUser()).thenReturn(
      System.getProperty("user.name"));
  String currentUser = System.getProperty("user.name");
  config.set(
      DefaultImpersonationProvider.getTestProvider().
          getProxySuperuserGroupConfKey(currentUser),
      "*");
  config.set(
      DefaultImpersonationProvider.getTestProvider().
          getProxySuperuserIpConfKey(currentUser),
      "*");
  ProxyUsers.refreshSuperUserGroupsConfiguration(config);
  // Use emphral port in case tests are running in parallel
  config.setInt("nfs3.mountd.port", 0);
  config.setInt("nfs3.server.port", 0);

  try {
    cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).build();
    cluster.waitActive();

    Nfs3 nfs3 = new Nfs3(config);
    nfs3.startServiceInternal(false);
    nfsd = (RpcProgramNfs3) nfs3.getRpcProgram();

    DFSClient dfsClient = new DFSClient(NameNode.getAddress(config), config);
    HdfsFileStatus status = dfsClient.getFileInfo("/");
    FileHandle rootHandle = new FileHandle(status.getFileId());

    CREATE3Request createReq = new CREATE3Request(rootHandle,
        "out-of-order-write" + System.currentTimeMillis(),
        Nfs3Constant.CREATE_UNCHECKED, new SetAttr3(), 0);
    XDR createXdr = new XDR();
    createReq.serialize(createXdr);
    CREATE3Response createRsp = nfsd.create(createXdr.asReadOnlyWrap(),
        securityHandler, new InetSocketAddress("localhost", 1234));
    FileHandle handle = createRsp.getObjHandle();

    byte[][] oooBuf = new byte[numOOO][bufSize];
    for (int i = 0; i < numOOO; i++) {
      Arrays.fill(oooBuf[i], (byte) i);
    }

    for (int i = 0; i < numOOO; i++) {
      final long offset = (numOOO - 1 - i) * bufSize;
      WRITE3Request writeReq = new WRITE3Request(handle, offset, bufSize,
          WriteStableHow.UNSTABLE, ByteBuffer.wrap(oooBuf[i]));
      XDR writeXdr = new XDR();
      writeReq.serialize(writeXdr);
      nfsd.write(writeXdr.asReadOnlyWrap(), null, 1, securityHandler,
          new InetSocketAddress("localhost", 1234));
    }

    waitWrite(nfsd, handle, 60000);
    READ3Request readReq = new READ3Request(handle, bufSize, bufSize);
    XDR readXdr = new XDR();
    readReq.serialize(readXdr);
    READ3Response readRsp = nfsd.read(readXdr.asReadOnlyWrap(),
        securityHandler, new InetSocketAddress("localhost", config.getInt(
            NfsConfigKeys.DFS_NFS_SERVER_PORT_KEY,
            NfsConfigKeys.DFS_NFS_SERVER_PORT_DEFAULT)));
    assertTrue(Arrays.equals(oooBuf[1], readRsp.getData().array()));
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:76,代码来源:TestWrites.java

示例15: testResetThreadLocalCachedOps

import org.apache.hadoop.hdfs.MiniDFSCluster; //导入方法依赖的package包/类
/**
 * Edit log op instances are cached internally using thread-local storage.
 * This test checks that the cached instances are reset in between different
 * transactions processed on the same thread, so that we don't accidentally
 * apply incorrect attributes to an inode.
 *
 * @throws IOException if there is an I/O error
 */
@Test
public void testResetThreadLocalCachedOps() throws IOException {
  Configuration conf = new HdfsConfiguration();
  conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
  // Set single handler thread, so all transactions hit same thread-local ops.
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_HANDLER_COUNT_KEY, 1);
  MiniDFSCluster cluster = null;
  FileSystem fileSys = null;
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    cluster.waitActive();
    fileSys = cluster.getFileSystem();

    // Create /dir1 with a default ACL.
    Path dir1 = new Path("/dir1");
    fileSys.mkdirs(dir1);
    List<AclEntry> aclSpec = Lists.newArrayList(
        aclEntry(DEFAULT, USER, "foo", READ_EXECUTE));
    fileSys.modifyAclEntries(dir1, aclSpec);

    // /dir1/dir2 is expected to clone the default ACL.
    Path dir2 = new Path("/dir1/dir2");
    fileSys.mkdirs(dir2);

    // /dir1/file1 is expected to clone the default ACL.
    Path file1 = new Path("/dir1/file1");
    fileSys.create(file1).close();

    // /dir3 is not a child of /dir1, so must not clone the default ACL.
    Path dir3 = new Path("/dir3");
    fileSys.mkdirs(dir3);

    // /file2 is not a child of /dir1, so must not clone the default ACL.
    Path file2 = new Path("/file2");
    fileSys.create(file2).close();

    // Restart and assert the above stated expectations.
    IOUtils.cleanup(LOG, fileSys);
    cluster.restartNameNode();
    fileSys = cluster.getFileSystem();
    assertFalse(fileSys.getAclStatus(dir1).getEntries().isEmpty());
    assertFalse(fileSys.getAclStatus(dir2).getEntries().isEmpty());
    assertFalse(fileSys.getAclStatus(file1).getEntries().isEmpty());
    assertTrue(fileSys.getAclStatus(dir3).getEntries().isEmpty());
    assertTrue(fileSys.getAclStatus(file2).getEntries().isEmpty());
  } finally {
    IOUtils.cleanup(LOG, fileSys);
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:61,代码来源:TestEditLog.java


注:本文中的org.apache.hadoop.hdfs.MiniDFSCluster.waitActive方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。