当前位置: 首页>>代码示例>>Java>>正文


Java HBaseTestingUtility.shutdownMiniCluster方法代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.HBaseTestingUtility.shutdownMiniCluster方法的典型用法代码示例。如果您正苦于以下问题:Java HBaseTestingUtility.shutdownMiniCluster方法的具体用法?Java HBaseTestingUtility.shutdownMiniCluster怎么用?Java HBaseTestingUtility.shutdownMiniCluster使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hbase.HBaseTestingUtility的用法示例。


在下文中一共展示了HBaseTestingUtility.shutdownMiniCluster方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testHDFSLinkReadDuringRename

import org.apache.hadoop.hbase.HBaseTestingUtility; //导入方法依赖的package包/类
/**
 * Test, on HDFS, that the FileLink is still readable
 * even when the current file gets renamed.
 */
@Test
public void testHDFSLinkReadDuringRename() throws Exception {
  HBaseTestingUtility testUtil = new HBaseTestingUtility();
  Configuration conf = testUtil.getConfiguration();
  conf.setInt("dfs.blocksize", 1024 * 1024);
  conf.setInt("dfs.client.read.prefetch.size", 2 * 1024 * 1024);

  testUtil.startMiniDFSCluster(1);
  MiniDFSCluster cluster = testUtil.getDFSCluster();
  FileSystem fs = cluster.getFileSystem();
  assertEquals("hdfs", fs.getUri().getScheme());

  try {
    testLinkReadDuringRename(fs, testUtil.getDefaultRootDirPath());
  } finally {
    testUtil.shutdownMiniCluster();
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:23,代码来源:TestFileLink.java

示例2: testMasterFailoverBalancerPersistence

import org.apache.hadoop.hbase.HBaseTestingUtility; //导入方法依赖的package包/类
/**
 * Test that if the master fails, the load balancer maintains its
 * state (running or not) when the next master takes over
 *
 * @throws Exception
 */
@Test(timeout = 240000)
public void testMasterFailoverBalancerPersistence() throws Exception {
  final int NUM_MASTERS = 3;
  final int NUM_RS = 1;

  // Start the cluster
  HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();

  TEST_UTIL.startMiniCluster(NUM_MASTERS, NUM_RS);
  MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();

  assertTrue(cluster.waitForActiveAndReadyMaster());
  HMaster active = cluster.getMaster();
  // check that the balancer is on by default for the active master
  ClusterStatus clusterStatus = active.getClusterStatus();
  assertTrue(clusterStatus.isBalancerOn());

  active = killActiveAndWaitForNewActive(cluster);

  // ensure the load balancer is still running on new master
  clusterStatus = active.getClusterStatus();
  assertTrue(clusterStatus.isBalancerOn());

  // turn off the load balancer
  active.balanceSwitch(false);

  // once more, kill active master and wait for new active master to show up
  active = killActiveAndWaitForNewActive(cluster);

  // ensure the load balancer is not running on the new master
  clusterStatus = active.getClusterStatus();
  assertFalse(clusterStatus.isBalancerOn());

  // Stop the cluster
  TEST_UTIL.shutdownMiniCluster();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:43,代码来源:TestMasterFailoverBalancerPersistence.java

示例3: testForCheckingIfEnableAndDisableWorksFineAfterSwitch

import org.apache.hadoop.hbase.HBaseTestingUtility; //导入方法依赖的package包/类
@Test
public void testForCheckingIfEnableAndDisableWorksFineAfterSwitch()
    throws Exception {
  final int NUM_MASTERS = 2;
  final int NUM_RS = 1;
  final int NUM_REGIONS_TO_CREATE = 4;

  // Start the cluster
  log("Starting cluster");
  Configuration conf = HBaseConfiguration.create();
  HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(conf);
  TEST_UTIL.startMiniCluster(NUM_MASTERS, NUM_RS);
  MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
  log("Waiting for active/ready master");
  cluster.waitForActiveAndReadyMaster();
  ZooKeeperWatcher zkw = new ZooKeeperWatcher(conf, "testmasterRestart", null);
  HMaster master = cluster.getMaster();

  // Create a table with regions
  TableName table = TableName.valueOf("tableRestart");
  byte[] family = Bytes.toBytes("family");
  log("Creating table with " + NUM_REGIONS_TO_CREATE + " regions");
  HTable ht = TEST_UTIL.createMultiRegionTable(table, family, NUM_REGIONS_TO_CREATE);
  int numRegions = -1;
  try (RegionLocator r = ht.getRegionLocator()) {
    numRegions = r.getStartKeys().length;
  }
  numRegions += 1; // catalogs
  log("Waiting for no more RIT\n");
  blockUntilNoRIT(zkw, master);
  log("Disabling table\n");
  TEST_UTIL.getHBaseAdmin().disableTable(table);

  NavigableSet<String> regions = HBaseTestingUtility.getAllOnlineRegions(cluster);
  assertEquals(
      "The number of regions for the table tableRestart should be 0 and only"
          + "the catalog and namespace tables should be present.", 2, regions.size());

  List<MasterThread> masterThreads = cluster.getMasterThreads();
  MasterThread activeMaster = null;
  if (masterThreads.get(0).getMaster().isActiveMaster()) {
    activeMaster = masterThreads.get(0);
  } else {
    activeMaster = masterThreads.get(1);
  }
  activeMaster.getMaster().stop(
      "stopping the active master so that the backup can become active");
  cluster.hbaseCluster.waitOnMaster(activeMaster);
  cluster.waitForActiveAndReadyMaster();

  assertTrue("The table should not be in enabled state", cluster.getMaster()
      .getAssignmentManager().getTableStateManager().isTableState(
      TableName.valueOf("tableRestart"), ZooKeeperProtos.Table.State.DISABLED,
      ZooKeeperProtos.Table.State.DISABLING));
  log("Enabling table\n");
  // Need a new Admin, the previous one is on the old master
  Admin admin = new HBaseAdmin(TEST_UTIL.getConfiguration());
  admin.enableTable(table);
  admin.close();
  log("Waiting for no more RIT\n");
  blockUntilNoRIT(zkw, master);
  log("Verifying there are " + numRegions + " assigned on cluster\n");
  regions = HBaseTestingUtility.getAllOnlineRegions(cluster);
  assertEquals("The assigned regions were not onlined after master"
      + " switch except for the catalog and namespace tables.",
        6, regions.size());
  assertTrue("The table should be in enabled state", cluster.getMaster()
      .getAssignmentManager().getTableStateManager()
      .isTableState(TableName.valueOf("tableRestart"), ZooKeeperProtos.Table.State.ENABLED));
  ht.close();
  TEST_UTIL.shutdownMiniCluster();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:73,代码来源:TestMasterRestartAfterDisablingTable.java

示例4: testShouldCheckMasterFailOverWhenMETAIsInOpenedState

import org.apache.hadoop.hbase.HBaseTestingUtility; //导入方法依赖的package包/类
@Test (timeout=180000)
public void testShouldCheckMasterFailOverWhenMETAIsInOpenedState()
    throws Exception {
  LOG.info("Starting testShouldCheckMasterFailOverWhenMETAIsInOpenedState");
  final int NUM_MASTERS = 1;
  final int NUM_RS = 2;

  // Start the cluster
  HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
  Configuration conf = TEST_UTIL.getConfiguration();
  conf.setInt("hbase.master.info.port", -1);
  conf.setBoolean("hbase.assignment.usezk", true);

  TEST_UTIL.startMiniCluster(NUM_MASTERS, NUM_RS);
  MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();

  // Find regionserver carrying meta.
  List<RegionServerThread> regionServerThreads =
    cluster.getRegionServerThreads();
  Region metaRegion = null;
  HRegionServer metaRegionServer = null;
  for (RegionServerThread regionServerThread : regionServerThreads) {
    HRegionServer regionServer = regionServerThread.getRegionServer();
    metaRegion = regionServer.getOnlineRegion(HRegionInfo.FIRST_META_REGIONINFO.getRegionName());
    regionServer.abort("");
    if (null != metaRegion) {
      metaRegionServer = regionServer;
      break;
    }
  }

  TEST_UTIL.shutdownMiniHBaseCluster();

  // Create a ZKW to use in the test
  ZooKeeperWatcher zkw =
    HBaseTestingUtility.createAndForceNodeToOpenedState(TEST_UTIL,
        metaRegion, metaRegionServer.getServerName());

  LOG.info("Staring cluster for second time");
  TEST_UTIL.startMiniHBaseCluster(NUM_MASTERS, NUM_RS);

  HMaster master = TEST_UTIL.getHBaseCluster().getMaster();
  while (!master.isInitialized()) {
    Thread.sleep(100);
  }
  // Failover should be completed, now wait for no RIT
  log("Waiting for no more RIT");
  ZKAssign.blockUntilNoRIT(zkw);

  zkw.close();
  // Stop the cluster
  TEST_UTIL.shutdownMiniCluster();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:54,代码来源:TestMasterFailover.java

示例5: testMasterShutdown

import org.apache.hadoop.hbase.HBaseTestingUtility; //导入方法依赖的package包/类
/**
 * Simple test of shutdown.
 * <p>
 * Starts with three masters.  Tells the active master to shutdown the cluster.
 * Verifies that all masters are properly shutdown.
 * @throws Exception
 */
@Test (timeout=120000)
public void testMasterShutdown() throws Exception {
  final int NUM_MASTERS = 3;
  final int NUM_RS = 3;

  // Create config to use for this cluster
  Configuration conf = HBaseConfiguration.create();

  // Start the cluster
  HBaseTestingUtility htu = new HBaseTestingUtility(conf);
  htu.startMiniCluster(NUM_MASTERS, NUM_RS);
  MiniHBaseCluster cluster = htu.getHBaseCluster();

  // get all the master threads
  List<MasterThread> masterThreads = cluster.getMasterThreads();

  // wait for each to come online
  for (MasterThread mt : masterThreads) {
    assertTrue(mt.isAlive());
  }

  // find the active master
  HMaster active = null;
  for (int i = 0; i < masterThreads.size(); i++) {
    if (masterThreads.get(i).getMaster().isActiveMaster()) {
      active = masterThreads.get(i).getMaster();
      break;
    }
  }
  assertNotNull(active);
  // make sure the other two are backup masters
  ClusterStatus status = active.getClusterStatus();
  assertEquals(2, status.getBackupMastersSize());
  assertEquals(2, status.getBackupMasters().size());

  // tell the active master to shutdown the cluster
  active.shutdown();

  for (int i = NUM_MASTERS - 1; i >= 0 ;--i) {
    cluster.waitOnMaster(i);
  }
  // make sure all the masters properly shutdown
  assertEquals(0, masterThreads.size());

  htu.shutdownMiniCluster();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:54,代码来源:TestMasterShutdown.java

示例6: testRSTermnationAfterRegisteringToMasterBeforeCreatingEphemeralNod

import org.apache.hadoop.hbase.HBaseTestingUtility; //导入方法依赖的package包/类
/**
 * Test verifies whether a region server is removing from online servers list in master if it went
 * down after registering with master.
 * @throws Exception
 */
@Test(timeout = 180000)
public void testRSTermnationAfterRegisteringToMasterBeforeCreatingEphemeralNod() throws Exception {

  final int NUM_MASTERS = 1;
  final int NUM_RS = 2;
  firstRS.set(true);
  // Create config to use for this cluster
  Configuration conf = HBaseConfiguration.create();
  conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, 1);

  // Start the cluster
  final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(conf);
  TEST_UTIL.startMiniDFSCluster(3);
  TEST_UTIL.startMiniZKCluster();
  TEST_UTIL.createRootDir();
  final LocalHBaseCluster cluster =
      new LocalHBaseCluster(conf, NUM_MASTERS, NUM_RS, HMaster.class, MockedRegionServer.class);
  final MasterThread master = cluster.getMasters().get(0);
  master.start();
  try {
    long startTime = System.currentTimeMillis();
    while (!master.getMaster().isActiveMaster()) {
      try {
        Thread.sleep(100);
      } catch (InterruptedException ignored) {
      }
      if (System.currentTimeMillis() > startTime + 30000) {
        throw new RuntimeException("Master not active after 30 seconds");
      }
    }
    masterActive = true;
    cluster.getRegionServers().get(0).start();
    cluster.getRegionServers().get(1).start();
    Thread.sleep(10000);
    List<ServerName> onlineServersList =
        master.getMaster().getServerManager().getOnlineServersList();
    while (onlineServersList.size() > 1) {
      Thread.sleep(100);
      onlineServersList = master.getMaster().getServerManager().getOnlineServersList();
    }
    assertEquals(onlineServersList.size(), 1);
    cluster.shutdown();
  } finally {
    masterActive = false;
    firstRS.set(true);
    TEST_UTIL.shutdownMiniCluster();
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:54,代码来源:TestRSKilledWhenInitializing.java

示例7: testgetHDFSBlocksDistribution

import org.apache.hadoop.hbase.HBaseTestingUtility; //导入方法依赖的package包/类
@Test
public void testgetHDFSBlocksDistribution() throws Exception {
  HBaseTestingUtility htu = new HBaseTestingUtility();
  // Why do we set the block size in this test?  If we set it smaller than the kvs, then we'll
  // break up the file in to more pieces that can be distributed across the three nodes and we
  // won't be able to have the condition this test asserts; that at least one node has
  // a copy of all replicas -- if small block size, then blocks are spread evenly across the
  // the three nodes.  hfilev3 with tags seems to put us over the block size.  St.Ack.
  // final int DEFAULT_BLOCK_SIZE = 1024;
  // htu.getConfiguration().setLong("dfs.blocksize", DEFAULT_BLOCK_SIZE);
  htu.getConfiguration().setInt("dfs.replication", 2);

  // set up a cluster with 3 nodes
  MiniHBaseCluster cluster = null;
  String dataNodeHosts[] = new String[] { "host1", "host2", "host3" };
  int regionServersCount = 3;

  try {
    cluster = htu.startMiniCluster(1, regionServersCount, dataNodeHosts);
    byte[][] families = { fam1, fam2 };
    Table ht = htu.createTable(Bytes.toBytes(this.getName()), families);

    // Setting up region
    byte row[] = Bytes.toBytes("row1");
    byte col[] = Bytes.toBytes("col1");

    Put put = new Put(row);
    put.add(fam1, col, 1, Bytes.toBytes("test1"));
    put.add(fam2, col, 1, Bytes.toBytes("test2"));
    ht.put(put);

    HRegion firstRegion = htu.getHBaseCluster().getRegions(TableName.valueOf(this.getName()))
        .get(0);
    firstRegion.flush(true);
    HDFSBlocksDistribution blocksDistribution1 = firstRegion.getHDFSBlocksDistribution();

    // Given the default replication factor is 2 and we have 2 HFiles,
    // we will have total of 4 replica of blocks on 3 datanodes; thus there
    // must be at least one host that have replica for 2 HFiles. That host's
    // weight will be equal to the unique block weight.
    long uniqueBlocksWeight1 = blocksDistribution1.getUniqueBlocksTotalWeight();
    StringBuilder sb = new StringBuilder();
    for (String host: blocksDistribution1.getTopHosts()) {
      if (sb.length() > 0) sb.append(", ");
      sb.append(host);
      sb.append("=");
      sb.append(blocksDistribution1.getWeight(host));
    }

    String topHost = blocksDistribution1.getTopHosts().get(0);
    long topHostWeight = blocksDistribution1.getWeight(topHost);
    String msg = "uniqueBlocksWeight=" + uniqueBlocksWeight1 + ", topHostWeight=" +
      topHostWeight + ", topHost=" + topHost + "; " + sb.toString();
    LOG.info(msg);
    assertTrue(msg, uniqueBlocksWeight1 == topHostWeight);

    // use the static method to compute the value, it should be the same.
    // static method is used by load balancer or other components
    HDFSBlocksDistribution blocksDistribution2 = HRegion.computeHDFSBlocksDistribution(
        htu.getConfiguration(), firstRegion.getTableDesc(), firstRegion.getRegionInfo());
    long uniqueBlocksWeight2 = blocksDistribution2.getUniqueBlocksTotalWeight();

    assertTrue(uniqueBlocksWeight1 == uniqueBlocksWeight2);

    ht.close();
  } finally {
    if (cluster != null) {
      htu.shutdownMiniCluster();
    }
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:72,代码来源:TestHRegion.java

示例8: testJoinedScanners

import org.apache.hadoop.hbase.HBaseTestingUtility; //导入方法依赖的package包/类
@Test
public void testJoinedScanners() throws Exception {
  String dataNodeHosts[] = new String[] { "host1", "host2", "host3" };
  int regionServersCount = 3;

  HBaseTestingUtility htu = new HBaseTestingUtility();

  final int DEFAULT_BLOCK_SIZE = 1024*1024;
  htu.getConfiguration().setLong("dfs.blocksize", DEFAULT_BLOCK_SIZE);
  htu.getConfiguration().setInt("dfs.replication", 1);
  htu.getConfiguration().setLong("hbase.hregion.max.filesize", 322122547200L);
  MiniHBaseCluster cluster = null;

  try {
    cluster = htu.startMiniCluster(1, regionServersCount, dataNodeHosts);
    byte [][] families = {cf_essential, cf_joined};

    TableName tableName = TableName.valueOf(this.getClass().getSimpleName());
    HTableDescriptor desc = new HTableDescriptor(tableName);
    for(byte[] family : families) {
      HColumnDescriptor hcd = new HColumnDescriptor(family);
      hcd.setDataBlockEncoding(blockEncoding);
      desc.addFamily(hcd);
    }
    htu.getHBaseAdmin().createTable(desc);
    Table ht = new HTable(htu.getConfiguration(), tableName);

    long rows_to_insert = 1000;
    int insert_batch = 20;
    long time = System.nanoTime();
    Random rand = new Random(time);

    LOG.info("Make " + Long.toString(rows_to_insert) + " rows, total size = "
      + Float.toString(rows_to_insert * valueWidth / 1024 / 1024) + " MB");

    byte [] val_large = new byte[valueWidth];

    List<Put> puts = new ArrayList<Put>();

    for (long i = 0; i < rows_to_insert; i++) {
      Put put = new Put(Bytes.toBytes(Long.toString (i)));
      if (rand.nextInt(100) <= selectionRatio) {
        put.add(cf_essential, col_name, flag_yes);
      } else {
        put.add(cf_essential, col_name, flag_no);
      }
      put.add(cf_joined, col_name, val_large);
      puts.add(put);
      if (puts.size() >= insert_batch) {
        ht.put(puts);
        puts.clear();
      }
    }
    if (puts.size() >= 0) {
      ht.put(puts);
      puts.clear();
    }

    LOG.info("Data generated in "
      + Double.toString((System.nanoTime() - time) / 1000000000.0) + " seconds");

    boolean slow = true;
    for (int i = 0; i < 10; ++i) {
      runScanner(ht, slow);
      slow = !slow;
    }

    ht.close();
  } finally {
    if (cluster != null) {
      htu.shutdownMiniCluster();
    }
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:75,代码来源:TestJoinedScanners.java

示例9: testHDFSLinkReadDuringDelete

import org.apache.hadoop.hbase.HBaseTestingUtility; //导入方法依赖的package包/类
/**
 * Test that link is still readable even when the current file gets deleted.
 *
 * NOTE: This test is valid only on HDFS.
 * When a file is deleted from a local file-system, it is simply 'unlinked'.
 * The inode, which contains the file's data, is not deleted until all
 * processes have finished with it.
 * In HDFS when the request exceed the cached block locations,
 * a query to the namenode is performed, using the filename,
 * and the deleted file doesn't exists anymore (FileNotFoundException).
 */
@Test
public void testHDFSLinkReadDuringDelete() throws Exception {
  HBaseTestingUtility testUtil = new HBaseTestingUtility();
  Configuration conf = testUtil.getConfiguration();
  conf.setInt("dfs.blocksize", 1024 * 1024);
  conf.setInt("dfs.client.read.prefetch.size", 2 * 1024 * 1024);

  testUtil.startMiniDFSCluster(1);
  MiniDFSCluster cluster = testUtil.getDFSCluster();
  FileSystem fs = cluster.getFileSystem();
  assertEquals("hdfs", fs.getUri().getScheme());

  try {
    List<Path> files = new ArrayList<Path>();
    for (int i = 0; i < 3; i++) {
      Path path = new Path(String.format("test-data-%d", i));
      writeSomeData(fs, path, 1 << 20, (byte)i);
      files.add(path);
    }

    FileLink link = new FileLink(files);
    FSDataInputStream in = link.open(fs);
    try {
      byte[] data = new byte[8192];
      int n;

      // Switch to file 1
      n = in.read(data);
      dataVerify(data, n, (byte)0);
      fs.delete(files.get(0), true);
      skipBuffer(in, (byte)0);

      // Switch to file 2
      n = in.read(data);
      dataVerify(data, n, (byte)1);
      fs.delete(files.get(1), true);
      skipBuffer(in, (byte)1);

      // Switch to file 3
      n = in.read(data);
      dataVerify(data, n, (byte)2);
      fs.delete(files.get(2), true);
      skipBuffer(in, (byte)2);

      // No more files available
      try {
        n = in.read(data);
        assert(n <= 0);
      } catch (FileNotFoundException e) {
        assertTrue(true);
      }
    } finally {
      in.close();
    }
  } finally {
    testUtil.shutdownMiniCluster();
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:70,代码来源:TestFileLink.java


注:本文中的org.apache.hadoop.hbase.HBaseTestingUtility.shutdownMiniCluster方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。