当前位置: 首页>>代码示例>>Java>>正文


Java HBaseTestingUtility.startMiniCluster方法代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.HBaseTestingUtility.startMiniCluster方法的典型用法代码示例。如果您正苦于以下问题:Java HBaseTestingUtility.startMiniCluster方法的具体用法?Java HBaseTestingUtility.startMiniCluster怎么用?Java HBaseTestingUtility.startMiniCluster使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hbase.HBaseTestingUtility的用法示例。


在下文中一共展示了HBaseTestingUtility.startMiniCluster方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: setupBeforeClass

import org.apache.hadoop.hbase.HBaseTestingUtility; //导入方法依赖的package包/类
@BeforeClass
public static void setupBeforeClass() throws Exception {
  TEST_UTIL = new HBaseTestingUtility();
  Configuration conf = TEST_UTIL.getConfiguration();
  // Up the handlers; this test needs more than usual.
  conf.setInt(HConstants.REGION_SERVER_HIGH_PRIORITY_HANDLER_COUNT, 10);
  enableSecurity(conf);
  verifyConfiguration(conf);

  // We expect 0.98 scanning semantics
  conf.setBoolean(AccessControlConstants.CF_ATTRIBUTE_EARLY_OUT, false);

  TEST_UTIL.startMiniCluster();
  TEST_UTIL.waitTableEnabled(AccessControlLists.ACL_TABLE_NAME.getName(), 50000);

  READER = User.createUserForTesting(conf, "reader", new String[0]);
  LIMITED = User.createUserForTesting(conf, "limited", new String[0]);
  DENIED = User.createUserForTesting(conf, "denied", new String[0]);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:20,代码来源:TestAccessControlFilter.java

示例2: setUpBeforeClass

import org.apache.hadoop.hbase.HBaseTestingUtility; //导入方法依赖的package包/类
@BeforeClass
public static void setUpBeforeClass() throws Exception {
  conf1.setBoolean(HConstants.REPLICATION_ENABLE_KEY, HConstants.REPLICATION_ENABLE_DEFAULT);
  utility1 = new HBaseTestingUtility(conf1);
  utility1.startMiniCluster();
  admin = new ReplicationAdmin(conf1);

  conf2 = HBaseConfiguration.create(conf1);
  conf2.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/2");
  conf2.setInt(HConstants.ZOOKEEPER_CLIENT_PORT, 2182);

  utility2 = new HBaseTestingUtility(conf2);
  utility2.startMiniCluster();

  ReplicationPeerConfig config = new ReplicationPeerConfig();
  config.setClusterKey(utility2.getClusterKey());
  admin.addPeer(peerId, config, null);

  HTableDescriptor table = new HTableDescriptor(tableName);
  HColumnDescriptor fam = new HColumnDescriptor(famName);
  fam.setScope(HConstants.REPLICATION_SCOPE_GLOBAL);
  table.addFamily(fam);

  utility1.getHBaseAdmin().createTable(table, HBaseTestingUtility.KEYS_FOR_HBA_CREATE_TABLE);
  utility1.waitUntilAllRegionsAssigned(tableName);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:27,代码来源:TestReplicationAdminWithTwoDifferentZKClusters.java

示例3: setUpBeforeClass

import org.apache.hadoop.hbase.HBaseTestingUtility; //导入方法依赖的package包/类
@BeforeClass
public static void setUpBeforeClass() throws Exception {
  conf = HBaseConfiguration.create();
  conf.setStrings(CoprocessorHost.USER_REGION_COPROCESSOR_CONF_KEY,
      TestCoprocessor.class.getName());
  util = new HBaseTestingUtility(conf);
  util.startMiniCluster();

  Admin admin = util.getHBaseAdmin();
  if (admin.tableExists(tableName)) {
    if (admin.isTableEnabled(tableName)) {
      admin.disableTable(tableName);
    }
    admin.deleteTable(tableName);
  }
  util.createTable(tableName, new byte[][]{dummy, test});

  Table ht = new HTable(conf, tableName);
  Put p = new Put(row1);
  p.add(dummy, dummy, dummy);
  ht.put(p);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:23,代码来源:TestClientOperationInterrupt.java

示例4: setUpBefore

import org.apache.hadoop.hbase.HBaseTestingUtility; //导入方法依赖的package包/类
@Before
public void setUpBefore() throws Exception {
  TEST_UTIL = new HBaseTestingUtility();
  TEST_UTIL.getConfiguration().setInt("dfs.datanode.max.xceivers", 9192);
  TEST_UTIL.startMiniCluster(3);
  conf = TEST_UTIL.getConfiguration();
  this.connection = ConnectionFactory.createConnection(conf);
  assertEquals(0, TEST_UTIL.getHBaseAdmin().listTables().length);

  // setup the table
  table = TableName.valueOf(TABLE_BASE + "-" + tableIdx);
  tableIdx++;
  htbl = setupTable(table);
  populateTable(htbl);
  assertEquals(5, scanMeta());
  LOG.info("Table " + table + " has " + tableRowCount(conf, table)
      + " entries.");
  assertEquals(16, tableRowCount(conf, table));
  TEST_UTIL.getHBaseAdmin().disableTable(table);
  assertEquals(1, TEST_UTIL.getHBaseAdmin().listTables().length);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:22,代码来源:OfflineMetaRebuildTestCore.java

示例5: startCluster

import org.apache.hadoop.hbase.HBaseTestingUtility; //导入方法依赖的package包/类
@BeforeClass
public static void startCluster() throws Exception {
  metricsHelper = CompatibilityFactory.getInstance(MetricsAssertHelper.class);
  TEST_UTIL = new HBaseTestingUtility();
  conf = TEST_UTIL.getConfiguration();
  conf.getLong("hbase.splitlog.max.resubmit", 0);
  // Make the failure test faster
  conf.setInt("zookeeper.recovery.retry", 0);
  conf.setInt(HConstants.REGIONSERVER_INFO_PORT, -1);

  TEST_UTIL.startMiniCluster(1, 2);
  cluster = TEST_UTIL.getHBaseCluster();

  cluster.waitForActiveAndReadyMaster();

  while (cluster.getLiveRegionServerThreads().size() < 2) {
    Threads.sleep(100);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:20,代码来源:TestRemoveRegionMetrics.java

示例6: setupClass

import org.apache.hadoop.hbase.HBaseTestingUtility; //导入方法依赖的package包/类
@BeforeClass
public static void setupClass() throws Exception {
  utility = new HBaseTestingUtility();
  Path dataTestDir = utility.getDataTestDir().getParent();
  int length = dataTestDir.toString().length();
  if (length > TEST_DIRECTORY_MAX_LENGTH) {
    System.err.println(TEST_DIRECTORY_INVALID_MESSAGE);
    System.err.println("Current HBase test directory: " + dataTestDir);
    throw new RuntimeException(TEST_DIRECTORY_INVALID_MESSAGE);
  }
  utility.startMiniCluster();
}
 
开发者ID:bakdata,项目名称:ignite-hbase,代码行数:13,代码来源:HBaseCacheStoreTest.java

示例7: setupBeforeClass

import org.apache.hadoop.hbase.HBaseTestingUtility; //导入方法依赖的package包/类
public static void setupBeforeClass() throws Exception {
  TEST_UTIL = new HBaseTestingUtility();
  CONF = TEST_UTIL.getConfiguration();
  CONF.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY,
      RowCountEndpoint.class.getName());

  TEST_UTIL.startMiniCluster();
  TEST_UTIL.createTable(TEST_TABLE, new byte[][]{TEST_FAMILY});
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:10,代码来源:TestRowCountEndpoint.java

示例8: startCluster

import org.apache.hadoop.hbase.HBaseTestingUtility; //导入方法依赖的package包/类
@BeforeClass
public static void startCluster() throws Exception {
  LOG.info("Starting cluster");
  TEST_UTIL = new HBaseTestingUtility();
  TEST_UTIL.startMiniCluster(1, 1, 1, null, MyMaster.class, null);
  cluster = TEST_UTIL.getHBaseCluster();
  LOG.info("Waiting for active/ready master");
  cluster.waitForActiveAndReadyMaster();
  master = cluster.getMaster();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:11,代码来源:TestMasterMetrics.java

示例9: testMasterFailoverBalancerPersistence

import org.apache.hadoop.hbase.HBaseTestingUtility; //导入方法依赖的package包/类
/**
 * Test that if the master fails, the load balancer maintains its
 * state (running or not) when the next master takes over
 *
 * @throws Exception
 */
@Test(timeout = 240000)
public void testMasterFailoverBalancerPersistence() throws Exception {
  final int NUM_MASTERS = 3;
  final int NUM_RS = 1;

  // Start the cluster
  HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();

  TEST_UTIL.startMiniCluster(NUM_MASTERS, NUM_RS);
  MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();

  assertTrue(cluster.waitForActiveAndReadyMaster());
  HMaster active = cluster.getMaster();
  // check that the balancer is on by default for the active master
  ClusterStatus clusterStatus = active.getClusterStatus();
  assertTrue(clusterStatus.isBalancerOn());

  active = killActiveAndWaitForNewActive(cluster);

  // ensure the load balancer is still running on new master
  clusterStatus = active.getClusterStatus();
  assertTrue(clusterStatus.isBalancerOn());

  // turn off the load balancer
  active.balanceSwitch(false);

  // once more, kill active master and wait for new active master to show up
  active = killActiveAndWaitForNewActive(cluster);

  // ensure the load balancer is not running on the new master
  clusterStatus = active.getClusterStatus();
  assertFalse(clusterStatus.isBalancerOn());

  // Stop the cluster
  TEST_UTIL.shutdownMiniCluster();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:43,代码来源:TestMasterFailoverBalancerPersistence.java

示例10: setupCluster

import org.apache.hadoop.hbase.HBaseTestingUtility; //导入方法依赖的package包/类
@BeforeClass
public static void setupCluster() throws Exception {
  util = new HBaseTestingUtility();
  // set the always on security provider
  UserProvider.setUserProviderForTesting(util.getConfiguration(),
    HadoopSecurityEnabledUserProviderForTesting.class);
  // setup configuration
  SecureTestUtil.enableSecurity(util.getConfiguration());

  util.startMiniCluster();

  // Wait for the ACL table to become available
  util.waitTableEnabled(AccessControlLists.ACL_TABLE_NAME);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:15,代码来源:TestSecureLoadIncrementalHFilesSplitRecovery.java

示例11: setupBeforeClass

import org.apache.hadoop.hbase.HBaseTestingUtility; //导入方法依赖的package包/类
@BeforeClass
public static void setupBeforeClass() throws Exception {
  TEST_UTIL = new HBaseTestingUtility();
  TEST_UTIL.getConfiguration().setStrings(CoprocessorHost.REGIONSERVER_COPROCESSOR_CONF_KEY,
    DummyRegionServerEndpoint.class.getName());
  TEST_UTIL.startMiniCluster();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:8,代码来源:TestRegionServerCoprocessorEndpoint.java

示例12: setUpBeforeClass

import org.apache.hadoop.hbase.HBaseTestingUtility; //导入方法依赖的package包/类
@BeforeClass
public static void setUpBeforeClass() throws Exception {
  Configuration conf = HBaseConfiguration.create();
  conf.setStrings(CoprocessorHost.USER_REGION_COPROCESSOR_CONF_KEY,
      TestCoprocessor.class.getName());
  util = new HBaseTestingUtility(conf);
  util.startMiniCluster();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:9,代码来源:TestRegionObserverBypass.java

示例13: setUpBeforeClass

import org.apache.hadoop.hbase.HBaseTestingUtility; //导入方法依赖的package包/类
@BeforeClass
public static void setUpBeforeClass() throws Exception {
  util = new HBaseTestingUtility();
  util.getConfiguration().setBoolean(CoprocessorHost.ABORT_ON_ERROR_KEY, false);
  util.startMiniCluster();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:7,代码来源:TestConstraint.java

示例14: setUpBeforeClass

import org.apache.hadoop.hbase.HBaseTestingUtility; //导入方法依赖的package包/类
@BeforeClass
public static void setUpBeforeClass() throws Exception {
  conf1.setInt("hfile.format.version", 3);
  conf1.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/1");
  conf1.setInt("replication.source.size.capacity", 10240);
  conf1.setLong("replication.source.sleepforretries", 100);
  conf1.setInt("hbase.regionserver.maxlogs", 10);
  conf1.setLong("hbase.master.logcleaner.ttl", 10);
  conf1.setInt("zookeeper.recovery.retry", 1);
  conf1.setInt("zookeeper.recovery.retry.intervalmill", 10);
  conf1.setBoolean("dfs.support.append", true);
  conf1.setLong(HConstants.THREAD_WAKE_FREQUENCY, 100);
  conf1.setInt("replication.stats.thread.period.seconds", 5);
  conf1.setBoolean("hbase.tests.use.shortcircuit.reads", false);
  conf1.setStrings(HConstants.REPLICATION_CODEC_CONF_KEY, KeyValueCodecWithTags.class.getName());
  conf1.setStrings(CoprocessorHost.USER_REGION_COPROCESSOR_CONF_KEY,
      TestCoprocessorForTagsAtSource.class.getName());

  utility1 = new HBaseTestingUtility(conf1);
  utility1.startMiniZKCluster();
  MiniZooKeeperCluster miniZK = utility1.getZkCluster();
  // Have to reget conf1 in case zk cluster location different
  // than default
  conf1 = utility1.getConfiguration();
  replicationAdmin = new ReplicationAdmin(conf1);
  LOG.info("Setup first Zk");

  // Base conf2 on conf1 so it gets the right zk cluster.
  conf2 = HBaseConfiguration.create(conf1);
  conf2.setInt("hfile.format.version", 3);
  conf2.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/2");
  conf2.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 6);
  conf2.setBoolean("dfs.support.append", true);
  conf2.setBoolean("hbase.tests.use.shortcircuit.reads", false);
  conf2.setStrings(HConstants.REPLICATION_CODEC_CONF_KEY, KeyValueCodecWithTags.class.getName());
  conf2.setStrings(CoprocessorHost.USER_REGION_COPROCESSOR_CONF_KEY,
      TestCoprocessorForTagsAtSink.class.getName());

  utility2 = new HBaseTestingUtility(conf2);
  utility2.setZkCluster(miniZK);

  replicationAdmin.addPeer("2", utility2.getClusterKey());

  LOG.info("Setup second Zk");
  utility1.startMiniCluster(2);
  utility2.startMiniCluster(2);

  HTableDescriptor table = new HTableDescriptor(TABLE_NAME);
  HColumnDescriptor fam = new HColumnDescriptor(FAMILY);
  fam.setMaxVersions(3);
  fam.setScope(HConstants.REPLICATION_SCOPE_GLOBAL);
  table.addFamily(fam);
  try (Connection conn = ConnectionFactory.createConnection(conf1);
      Admin admin = conn.getAdmin()) {
    admin.createTable(table, HBaseTestingUtility.KEYS_FOR_HBA_CREATE_TABLE);
  }
  try (Connection conn = ConnectionFactory.createConnection(conf2);
      Admin admin = conn.getAdmin()) {
    admin.createTable(table, HBaseTestingUtility.KEYS_FOR_HBA_CREATE_TABLE);
  }
  htable1 = utility1.getConnection().getTable(TABLE_NAME);
  htable2 = utility2.getConnection().getTable(TABLE_NAME);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:64,代码来源:TestReplicationWithTags.java

示例15: testForCheckingIfEnableAndDisableWorksFineAfterSwitch

import org.apache.hadoop.hbase.HBaseTestingUtility; //导入方法依赖的package包/类
@Test
public void testForCheckingIfEnableAndDisableWorksFineAfterSwitch()
    throws Exception {
  final int NUM_MASTERS = 2;
  final int NUM_RS = 1;
  final int NUM_REGIONS_TO_CREATE = 4;

  // Start the cluster
  log("Starting cluster");
  Configuration conf = HBaseConfiguration.create();
  HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(conf);
  TEST_UTIL.startMiniCluster(NUM_MASTERS, NUM_RS);
  MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
  log("Waiting for active/ready master");
  cluster.waitForActiveAndReadyMaster();
  ZooKeeperWatcher zkw = new ZooKeeperWatcher(conf, "testmasterRestart", null);
  HMaster master = cluster.getMaster();

  // Create a table with regions
  TableName table = TableName.valueOf("tableRestart");
  byte[] family = Bytes.toBytes("family");
  log("Creating table with " + NUM_REGIONS_TO_CREATE + " regions");
  HTable ht = TEST_UTIL.createMultiRegionTable(table, family, NUM_REGIONS_TO_CREATE);
  int numRegions = -1;
  try (RegionLocator r = ht.getRegionLocator()) {
    numRegions = r.getStartKeys().length;
  }
  numRegions += 1; // catalogs
  log("Waiting for no more RIT\n");
  blockUntilNoRIT(zkw, master);
  log("Disabling table\n");
  TEST_UTIL.getHBaseAdmin().disableTable(table);

  NavigableSet<String> regions = HBaseTestingUtility.getAllOnlineRegions(cluster);
  assertEquals(
      "The number of regions for the table tableRestart should be 0 and only"
          + "the catalog and namespace tables should be present.", 2, regions.size());

  List<MasterThread> masterThreads = cluster.getMasterThreads();
  MasterThread activeMaster = null;
  if (masterThreads.get(0).getMaster().isActiveMaster()) {
    activeMaster = masterThreads.get(0);
  } else {
    activeMaster = masterThreads.get(1);
  }
  activeMaster.getMaster().stop(
      "stopping the active master so that the backup can become active");
  cluster.hbaseCluster.waitOnMaster(activeMaster);
  cluster.waitForActiveAndReadyMaster();

  assertTrue("The table should not be in enabled state", cluster.getMaster()
      .getAssignmentManager().getTableStateManager().isTableState(
      TableName.valueOf("tableRestart"), ZooKeeperProtos.Table.State.DISABLED,
      ZooKeeperProtos.Table.State.DISABLING));
  log("Enabling table\n");
  // Need a new Admin, the previous one is on the old master
  Admin admin = new HBaseAdmin(TEST_UTIL.getConfiguration());
  admin.enableTable(table);
  admin.close();
  log("Waiting for no more RIT\n");
  blockUntilNoRIT(zkw, master);
  log("Verifying there are " + numRegions + " assigned on cluster\n");
  regions = HBaseTestingUtility.getAllOnlineRegions(cluster);
  assertEquals("The assigned regions were not onlined after master"
      + " switch except for the catalog and namespace tables.",
        6, regions.size());
  assertTrue("The table should be in enabled state", cluster.getMaster()
      .getAssignmentManager().getTableStateManager()
      .isTableState(TableName.valueOf("tableRestart"), ZooKeeperProtos.Table.State.ENABLED));
  ht.close();
  TEST_UTIL.shutdownMiniCluster();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:73,代码来源:TestMasterRestartAfterDisablingTable.java


注:本文中的org.apache.hadoop.hbase.HBaseTestingUtility.startMiniCluster方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。