當前位置: 首頁>>代碼示例>>Java>>正文


Java HTableDescriptor.setRegionReplication方法代碼示例

本文整理匯總了Java中org.apache.hadoop.hbase.HTableDescriptor.setRegionReplication方法的典型用法代碼示例。如果您正苦於以下問題:Java HTableDescriptor.setRegionReplication方法的具體用法?Java HTableDescriptor.setRegionReplication怎麽用?Java HTableDescriptor.setRegionReplication使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.apache.hadoop.hbase.HTableDescriptor的用法示例。


在下文中一共展示了HTableDescriptor.setRegionReplication方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: createTable

import org.apache.hadoop.hbase.HTableDescriptor; //導入方法依賴的package包/類
public void createTable() throws Exception {

		HColumnDescriptor family1 = new HColumnDescriptor(firstFamily);
		HColumnDescriptor family2 = new HColumnDescriptor(secondFamily);
		family1.setMaxVersions(3);
		family2.setMaxVersions(3);

		HTableDescriptor descriptor = new HTableDescriptor(TableName.valueOf(nameSpaceName + ":" + tableName));
		descriptor.addFamily(family1);
		descriptor.addFamily(family2);
		descriptor.setRegionReplication(3); // replication
		admin.createTable(descriptor);
		// admin.split(TableName.valueOf("StudentInfo:student1"),
		// Bytes.toBytes("10"));
		// admin.split(TableName.valueOf("StudentInfo:student1"),
		// Bytes.toBytes("20"));
		// admin.split(TableName.valueOf("StudentInfo:student1"),
		// Bytes.toBytes("30"));
		// admin.split(TableName.valueOf("StudentInfo:student1"),
		// Bytes.toBytes("40"));
		// admin.split(TableName.valueOf("StudentInfo:student1"),
		// Bytes.toBytes("50"));
		// admin.split(TableName.valueOf("StudentInfo:student1"),
		// Bytes.toBytes("60"));
	}
 
開發者ID:husky00,項目名稱:worm,代碼行數:26,代碼來源:StoreToHbase.java

示例2: testCreateTableWithSingleReplica

import org.apache.hadoop.hbase.HTableDescriptor; //導入方法依賴的package包/類
@Test
public void testCreateTableWithSingleReplica() throws Exception {
  final int numRegions = 3;
  final int numReplica = 1;
  final TableName table = TableName.valueOf("singleReplicaTable");
  try {
    HTableDescriptor desc = new HTableDescriptor(table);
    desc.setRegionReplication(numReplica);
    desc.addFamily(new HColumnDescriptor("family"));
    ADMIN.createTable(desc, Bytes.toBytes("A"), Bytes.toBytes("Z"), numRegions);

    validateNumberOfRowsInMeta(table, numRegions, ADMIN.getConnection());
    List<HRegionInfo> hris = MetaTableAccessor.getTableRegions(TEST_UTIL.getZooKeeperWatcher(),
      ADMIN.getConnection(), table);
    assert(hris.size() == numRegions * numReplica);
  } finally {
    ADMIN.disableTable(table);
    ADMIN.deleteTable(table);
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:21,代碼來源:TestMasterOperationsForRegionReplicas.java

示例3: setupTableWithRegionReplica

import org.apache.hadoop.hbase.HTableDescriptor; //導入方法依賴的package包/類
/**
 * Setup a clean table with a certain region_replica count
 *
 * It will set tbl which needs to be closed after test
 *
 * @param tableName
 * @param replicaCount
 * @throws Exception
 */
void setupTableWithRegionReplica(TableName tablename, int replicaCount) throws Exception {
  HTableDescriptor desc = new HTableDescriptor(tablename);
  desc.setRegionReplication(replicaCount);
  HColumnDescriptor hcd = new HColumnDescriptor(Bytes.toString(FAM));
  desc.addFamily(hcd); // If a table has no CF's it doesn't get checked
  createTable(TEST_UTIL, desc, SPLITS);

  tbl = (HTable) connection.getTable(tablename, tableExecutorService);
  List<Put> puts = new ArrayList<Put>();
  for (byte[] row : ROWKEYS) {
    Put p = new Put(row);
    p.add(FAM, Bytes.toBytes("val"), row);
    puts.add(p);
  }
  tbl.put(puts);
  tbl.flushCommits();
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:27,代碼來源:TestHBaseFsck.java

示例4: testRegionReplicaReplicationPeerIsCreated

import org.apache.hadoop.hbase.HTableDescriptor; //導入方法依賴的package包/類
@Test
public void testRegionReplicaReplicationPeerIsCreated() throws IOException, ReplicationException {
  // create a table with region replicas. Check whether the replication peer is created
  // and replication started.
  ReplicationAdmin admin = new ReplicationAdmin(HTU.getConfiguration());
  String peerId = "region_replica_replication";

  if (admin.getPeerConfig(peerId) != null) {
    admin.removePeer(peerId);
  }

  HTableDescriptor htd = HTU.createTableDescriptor(
    "testReplicationPeerIsCreated_no_region_replicas");
  HTU.getHBaseAdmin().createTable(htd);
  ReplicationPeerConfig peerConfig = admin.getPeerConfig(peerId);
  assertNull(peerConfig);

  htd = HTU.createTableDescriptor("testReplicationPeerIsCreated");
  htd.setRegionReplication(2);
  HTU.getHBaseAdmin().createTable(htd);

  // assert peer configuration is correct
  peerConfig = admin.getPeerConfig(peerId);
  assertNotNull(peerConfig);
  assertEquals(peerConfig.getClusterKey(), ZKConfig.getZooKeeperClusterKey(
      HTU.getConfiguration()));
  assertEquals(peerConfig.getReplicationEndpointImpl(),
    RegionReplicaReplicationEndpoint.class.getName());
  admin.close();
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:31,代碼來源:TestRegionReplicaReplicationEndpoint.java

示例5: testRegionReplicaReplicationPeerIsCreatedForModifyTable

import org.apache.hadoop.hbase.HTableDescriptor; //導入方法依賴的package包/類
@Test (timeout=240000)
public void testRegionReplicaReplicationPeerIsCreatedForModifyTable() throws Exception {
  // modify a table by adding region replicas. Check whether the replication peer is created
  // and replication started.
  ReplicationAdmin admin = new ReplicationAdmin(HTU.getConfiguration());
  String peerId = "region_replica_replication";

  if (admin.getPeerConfig(peerId) != null) {
    admin.removePeer(peerId);
  }

  HTableDescriptor htd
    = HTU.createTableDescriptor("testRegionReplicaReplicationPeerIsCreatedForModifyTable");
  HTU.getHBaseAdmin().createTable(htd);

  // assert that replication peer is not created yet
  ReplicationPeerConfig peerConfig = admin.getPeerConfig(peerId);
  assertNull(peerConfig);

  HTU.getHBaseAdmin().disableTable(htd.getTableName());
  htd.setRegionReplication(2);
  HTU.getHBaseAdmin().modifyTable(htd.getTableName(), htd);
  HTU.getHBaseAdmin().enableTable(htd.getTableName());

  // assert peer configuration is correct
  peerConfig = admin.getPeerConfig(peerId);
  assertNotNull(peerConfig);
  assertEquals(peerConfig.getClusterKey(), ZKConfig.getZooKeeperClusterKey(
      HTU.getConfiguration()));
  assertEquals(peerConfig.getReplicationEndpointImpl(),
    RegionReplicaReplicationEndpoint.class.getName());
  admin.close();
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:34,代碼來源:TestRegionReplicaReplicationEndpoint.java

示例6: testRegionReplicaReplication

import org.apache.hadoop.hbase.HTableDescriptor; //導入方法依賴的package包/類
public void testRegionReplicaReplication(int regionReplication) throws Exception {
  // test region replica replication. Create a table with single region, write some data
  // ensure that data is replicated to the secondary region
  TableName tableName = TableName.valueOf("testRegionReplicaReplicationWithReplicas_"
      + regionReplication);
  HTableDescriptor htd = HTU.createTableDescriptor(tableName.toString());
  htd.setRegionReplication(regionReplication);
  HTU.getHBaseAdmin().createTable(htd);
  TableName tableNameNoReplicas =
      TableName.valueOf("testRegionReplicaReplicationWithReplicas_NO_REPLICAS");
  HTU.deleteTableIfAny(tableNameNoReplicas);
  HTU.createTable(tableNameNoReplicas, HBaseTestingUtility.fam1);

  Connection connection = ConnectionFactory.createConnection(HTU.getConfiguration());
  Table table = connection.getTable(tableName);
  Table tableNoReplicas = connection.getTable(tableNameNoReplicas);

  try {
    // load some data to the non-replicated table
    HTU.loadNumericRows(tableNoReplicas, HBaseTestingUtility.fam1, 6000, 7000);

    // load the data to the table
    HTU.loadNumericRows(table, HBaseTestingUtility.fam1, 0, 1000);

    verifyReplication(tableName, regionReplication, 0, 1000);

  } finally {
    table.close();
    tableNoReplicas.close();
    HTU.deleteTableIfAny(tableNameNoReplicas);
    connection.close();
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:34,代碼來源:TestRegionReplicaReplicationEndpoint.java

示例7: testRegionReplicaWithoutMemstoreReplication

import org.apache.hadoop.hbase.HTableDescriptor; //導入方法依賴的package包/類
@Test (timeout = 240000)
public void testRegionReplicaWithoutMemstoreReplication() throws Exception {
  int regionReplication = 3;
  TableName tableName = TableName.valueOf("testRegionReplicaWithoutMemstoreReplication");
  HTableDescriptor htd = HTU.createTableDescriptor(tableName.toString());
  htd.setRegionReplication(regionReplication);
  htd.setRegionMemstoreReplication(false);
  HTU.getHBaseAdmin().createTable(htd);

  Connection connection = ConnectionFactory.createConnection(HTU.getConfiguration());
  Table table = connection.getTable(tableName);
  try {
    // write data to the primary. The replicas should not receive the data
    final int STEP = 100;
    for (int i = 0; i < 3; ++i) {
      final int startRow = i * STEP;
      final int endRow = (i + 1) * STEP;
      LOG.info("Writing data from " + startRow + " to " + endRow);
      HTU.loadNumericRows(table, HBaseTestingUtility.fam1, startRow, endRow);
      verifyReplication(tableName, regionReplication, startRow, endRow, false);

      // Flush the table, now the data should show up in the replicas
      LOG.info("flushing table");
      HTU.flush(tableName);
      verifyReplication(tableName, regionReplication, 0, endRow, true);
    }
  } finally {
    table.close();
    connection.close();
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:32,代碼來源:TestRegionReplicaReplicationEndpoint.java

示例8: testRegionReplicaReplicationForFlushAndCompaction

import org.apache.hadoop.hbase.HTableDescriptor; //導入方法依賴的package包/類
@Test (timeout = 240000)
public void testRegionReplicaReplicationForFlushAndCompaction() throws Exception {
  // Tests a table with region replication 3. Writes some data, and causes flushes and
  // compactions. Verifies that the data is readable from the replicas. Note that this
  // does not test whether the replicas actually pick up flushed files and apply compaction
  // to their stores
  int regionReplication = 3;
  TableName tableName = TableName.valueOf("testRegionReplicaReplicationForFlushAndCompaction");
  HTableDescriptor htd = HTU.createTableDescriptor(tableName.toString());
  htd.setRegionReplication(regionReplication);
  HTU.getHBaseAdmin().createTable(htd);

  Connection connection = ConnectionFactory.createConnection(HTU.getConfiguration());
  Table table = connection.getTable(tableName);

  try {
    // load the data to the table

    for (int i = 0; i < 6000; i += 1000) {
      LOG.info("Writing data from " + i + " to " + (i+1000));
      HTU.loadNumericRows(table, HBaseTestingUtility.fam1, i, i+1000);
      LOG.info("flushing table");
      HTU.flush(tableName);
      LOG.info("compacting table");
      HTU.compact(tableName, false);
    }

    verifyReplication(tableName, regionReplication, 0, 1000);
  } finally {
    table.close();
    connection.close();
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:34,代碼來源:TestRegionReplicaReplicationEndpoint.java

示例9: testCreateDeleteTable

import org.apache.hadoop.hbase.HTableDescriptor; //導入方法依賴的package包/類
@Test (timeout=30000)
public void testCreateDeleteTable() throws IOException {
  // Create table then get the single region for our new table.
  HTableDescriptor hdt = HTU.createTableDescriptor("testCreateDeleteTable");
  hdt.setRegionReplication(NB_SERVERS);
  hdt.addCoprocessor(SlowMeCopro.class.getName());
  Table table = HTU.createTable(hdt, new byte[][]{f}, HTU.getConfiguration());

  Put p = new Put(row);
  p.add(f, row, row);
  table.put(p);

  Get g = new Get(row);
  Result r = table.get(g);
  Assert.assertFalse(r.isStale());

  try {
    // But if we ask for stale we will get it
    SlowMeCopro.cdl.set(new CountDownLatch(1));
    g = new Get(row);
    g.setConsistency(Consistency.TIMELINE);
    r = table.get(g);
    Assert.assertTrue(r.isStale());
    SlowMeCopro.cdl.get().countDown();
  } finally {
    SlowMeCopro.cdl.get().countDown();
    SlowMeCopro.sleepTime.set(0);
  }

  HTU.getHBaseAdmin().disableTable(hdt.getTableName());
  HTU.deleteTable(hdt.getTableName());
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:33,代碼來源:TestReplicaWithCluster.java

示例10: testRollbackAndDoubleExecution

import org.apache.hadoop.hbase.HTableDescriptor; //導入方法依賴的package包/類
@Test(timeout=90000)
public void testRollbackAndDoubleExecution() throws Exception {
  final TableName tableName = TableName.valueOf("testRollbackAndDoubleExecution");

  // create the table
  final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
  ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true);

  // Start the Create procedure && kill the executor
  final byte[][] splitKeys = new byte[][] {
    Bytes.toBytes("a"), Bytes.toBytes("b"), Bytes.toBytes("c")
  };
  HTableDescriptor htd = MasterProcedureTestingUtility.createHTD(tableName, "f1", "f2");
  htd.setRegionReplication(3);
  HRegionInfo[] regions = ModifyRegionUtils.createHRegionInfos(htd, splitKeys);
  long procId = procExec.submitProcedure(
    new CreateTableProcedure(procExec.getEnvironment(), htd, regions), nonceGroup, nonce);

  // NOTE: the 4 (number of CreateTableState steps) is hardcoded,
  //       so you have to look at this test at least once when you add a new step.
  MasterProcedureTestingUtility.testRollbackAndDoubleExecution(
      procExec, procId, 4, CreateTableState.values());

  MasterProcedureTestingUtility.validateTableDeletion(
    UTIL.getHBaseCluster().getMaster(), tableName, regions, "f1", "f2");

  // are we able to create the table after a rollback?
  resetProcExecutorTestingKillFlag();
  testSimpleCreate(tableName, splitKeys);
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:31,代碼來源:TestCreateTableProcedure.java

示例11: testModifyTable

import org.apache.hadoop.hbase.HTableDescriptor; //導入方法依賴的package包/類
@Test(timeout=60000)
public void testModifyTable() throws Exception {
  final TableName tableName = TableName.valueOf("testModifyTable");
  final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();

  MasterProcedureTestingUtility.createTable(procExec, tableName, null, "cf");
  UTIL.getHBaseAdmin().disableTable(tableName);

  // Modify the table descriptor
  HTableDescriptor htd = new HTableDescriptor(UTIL.getHBaseAdmin().getTableDescriptor(tableName));

  // Test 1: Modify 1 property
  long newMaxFileSize = htd.getMaxFileSize() * 2;
  htd.setMaxFileSize(newMaxFileSize);
  htd.setRegionReplication(3);

  long procId1 = ProcedureTestingUtility.submitAndWait(
      procExec, new ModifyTableProcedure(procExec.getEnvironment(), htd));
  ProcedureTestingUtility.assertProcNotFailed(procExec.getResult(procId1));

  HTableDescriptor currentHtd = UTIL.getHBaseAdmin().getTableDescriptor(tableName);
  assertEquals(newMaxFileSize, currentHtd.getMaxFileSize());

  // Test 2: Modify multiple properties
  boolean newReadOnlyOption = htd.isReadOnly() ? false : true;
  long newMemStoreFlushSize = htd.getMemStoreFlushSize() * 2;
  htd.setReadOnly(newReadOnlyOption);
  htd.setMemStoreFlushSize(newMemStoreFlushSize);

  long procId2 = ProcedureTestingUtility.submitAndWait(
      procExec, new ModifyTableProcedure(procExec.getEnvironment(), htd));
  ProcedureTestingUtility.assertProcNotFailed(procExec.getResult(procId2));

  currentHtd = UTIL.getHBaseAdmin().getTableDescriptor(tableName);
  assertEquals(newReadOnlyOption, currentHtd.isReadOnly());
  assertEquals(newMemStoreFlushSize, currentHtd.getMemStoreFlushSize());
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:38,代碼來源:TestModifyTableProcedure.java

示例12: testRollbackAndDoubleExecutionOffline

import org.apache.hadoop.hbase.HTableDescriptor; //導入方法依賴的package包/類
@Test(timeout = 60000)
public void testRollbackAndDoubleExecutionOffline() throws Exception {
  final TableName tableName = TableName.valueOf("testRollbackAndDoubleExecution");
  final String familyName = "cf2";
  final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();

  // create the table
  HRegionInfo[] regions = MasterProcedureTestingUtility.createTable(
    procExec, tableName, null, "cf1");
  UTIL.getHBaseAdmin().disableTable(tableName);

  ProcedureTestingUtility.waitNoProcedureRunning(procExec);
  ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true);

  HTableDescriptor htd = new HTableDescriptor(UTIL.getHBaseAdmin().getTableDescriptor(tableName));
  boolean newCompactionEnableOption = htd.isCompactionEnabled() ? false : true;
  htd.setCompactionEnabled(newCompactionEnableOption);
  htd.addFamily(new HColumnDescriptor(familyName));
  htd.setRegionReplication(3);

  // Start the Modify procedure && kill the executor
  long procId = procExec.submitProcedure(
    new ModifyTableProcedure(procExec.getEnvironment(), htd), nonceGroup, nonce);

  // Restart the executor and rollback the step twice
  int numberOfSteps = ModifyTableState.values().length - 4; // failing in the middle of proc
  MasterProcedureTestingUtility.testRollbackAndDoubleExecution(
    procExec,
    procId,
    numberOfSteps,
    ModifyTableState.values());

  // cf2 should not be present
  MasterProcedureTestingUtility.validateTableCreation(UTIL.getHBaseCluster().getMaster(),
    tableName, regions, "cf1");
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:37,代碼來源:TestModifyTableProcedure.java

示例13: testRollbackAndDoubleExecutionAfterPONR

import org.apache.hadoop.hbase.HTableDescriptor; //導入方法依賴的package包/類
@Test(timeout = 60000)
public void testRollbackAndDoubleExecutionAfterPONR() throws Exception {
  final TableName tableName = TableName.valueOf("testRollbackAndDoubleExecutionAfterPONR");
  final String familyToAddName = "cf2";
  final String familyToRemove = "cf1";
  final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();

  // create the table
  HRegionInfo[] regions = MasterProcedureTestingUtility.createTable(
    procExec, tableName, null, familyToRemove);
  UTIL.getHBaseAdmin().disableTable(tableName);

  ProcedureTestingUtility.waitNoProcedureRunning(procExec);
  ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true);

  HTableDescriptor htd = new HTableDescriptor(UTIL.getHBaseAdmin().getTableDescriptor(tableName));
  htd.setCompactionEnabled(!htd.isCompactionEnabled());
  htd.addFamily(new HColumnDescriptor(familyToAddName));
  htd.removeFamily(familyToRemove.getBytes());
  htd.setRegionReplication(3);

  // Start the Modify procedure && kill the executor
  long procId = procExec.submitProcedure(
    new ModifyTableProcedure(procExec.getEnvironment(), htd), nonceGroup, nonce);

  // Failing after MODIFY_TABLE_DELETE_FS_LAYOUT we should not trigger the rollback.
  // NOTE: the 5 (number of MODIFY_TABLE_DELETE_FS_LAYOUT + 1 step) is hardcoded,
  //       so you have to look at this test at least once when you add a new step.
  int numberOfSteps = 5;
  MasterProcedureTestingUtility.testRollbackAndDoubleExecutionAfterPONR(
    procExec,
    procId,
    numberOfSteps,
    ModifyTableState.values());

  // "cf2" should be added and "cf1" should be removed
  MasterProcedureTestingUtility.validateTableCreation(UTIL.getHBaseCluster().getMaster(),
    tableName, regions, false, familyToAddName);
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:40,代碼來源:TestModifyTableProcedure.java

示例14: createTable

import org.apache.hadoop.hbase.HTableDescriptor; //導入方法依賴的package包/類
public static void createTable(final HBaseTestingUtility util, final TableName tableName,
    int regionReplication, final byte[]... families) throws IOException, InterruptedException {
  HTableDescriptor htd = new HTableDescriptor(tableName);
  htd.setRegionReplication(regionReplication);
  for (byte[] family : families) {
    HColumnDescriptor hcd = new HColumnDescriptor(family);
    htd.addFamily(hcd);
  }
  byte[][] splitKeys = getSplitKeys();
  util.createTable(htd, splitKeys);
  assertEquals((splitKeys.length + 1) * regionReplication,
      util.getHBaseAdmin().getTableRegions(tableName).size());
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:14,代碼來源:SnapshotTestingUtils.java

示例15: testRegionReplicaReplicationIgnoresDisabledTables

import org.apache.hadoop.hbase.HTableDescriptor; //導入方法依賴的package包/類
public void testRegionReplicaReplicationIgnoresDisabledTables(boolean dropTable)
    throws Exception {
  // tests having edits from a disabled or dropped table is handled correctly by skipping those
  // entries and further edits after the edits from dropped/disabled table can be replicated
  // without problems.
  TableName tableName = TableName.valueOf("testRegionReplicaReplicationIgnoresDisabledTables"
    + dropTable);
  HTableDescriptor htd = HTU.createTableDescriptor(tableName.toString());
  int regionReplication = 3;
  htd.setRegionReplication(regionReplication);
  HTU.deleteTableIfAny(tableName);
  HTU.getHBaseAdmin().createTable(htd);
  TableName toBeDisabledTable = TableName.valueOf(dropTable ? "droppedTable" : "disabledTable");
  HTU.deleteTableIfAny(toBeDisabledTable);
  htd = HTU.createTableDescriptor(toBeDisabledTable.toString());
  htd.setRegionReplication(regionReplication);
  HTU.getHBaseAdmin().createTable(htd);

  // both tables are created, now pause replication
  ReplicationAdmin admin = new ReplicationAdmin(HTU.getConfiguration());
  admin.disablePeer(ServerRegionReplicaUtil.getReplicationPeerId());

  // now that the replication is disabled, write to the table to be dropped, then drop the table.

  Connection connection = ConnectionFactory.createConnection(HTU.getConfiguration());
  Table table = connection.getTable(tableName);
  Table tableToBeDisabled = connection.getTable(toBeDisabledTable);

  HTU.loadNumericRows(tableToBeDisabled, HBaseTestingUtility.fam1, 6000, 7000);

  AtomicLong skippedEdits = new AtomicLong();
  RegionReplicaReplicationEndpoint.RegionReplicaOutputSink sink =
      mock(RegionReplicaReplicationEndpoint.RegionReplicaOutputSink.class);
  when(sink.getSkippedEditsCounter()).thenReturn(skippedEdits);
  RegionReplicaReplicationEndpoint.RegionReplicaSinkWriter sinkWriter =
      new RegionReplicaReplicationEndpoint.RegionReplicaSinkWriter(sink,
        (ClusterConnection) connection,
        Executors.newSingleThreadExecutor(), Integer.MAX_VALUE);
  RegionLocator rl = connection.getRegionLocator(toBeDisabledTable);
  HRegionLocation hrl = rl.getRegionLocation(HConstants.EMPTY_BYTE_ARRAY);
  byte[] encodedRegionName = hrl.getRegionInfo().getEncodedNameAsBytes();

  Entry entry = new Entry(
    new WALKey(encodedRegionName, toBeDisabledTable, 1),
    new WALEdit());

  HTU.getHBaseAdmin().disableTable(toBeDisabledTable); // disable the table
  if (dropTable) {
    HTU.getHBaseAdmin().deleteTable(toBeDisabledTable);
  }

  sinkWriter.append(toBeDisabledTable, encodedRegionName,
    HConstants.EMPTY_BYTE_ARRAY, Lists.newArrayList(entry, entry));

  assertEquals(2, skippedEdits.get());

  try {
    // load some data to the to-be-dropped table

    // load the data to the table
    HTU.loadNumericRows(table, HBaseTestingUtility.fam1, 0, 1000);

    // now enable the replication
    admin.enablePeer(ServerRegionReplicaUtil.getReplicationPeerId());

    verifyReplication(tableName, regionReplication, 0, 1000);

  } finally {
    admin.close();
    table.close();
    rl.close();
    tableToBeDisabled.close();
    HTU.deleteTableIfAny(toBeDisabledTable);
    connection.close();
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:77,代碼來源:TestRegionReplicaReplicationEndpoint.java


注:本文中的org.apache.hadoop.hbase.HTableDescriptor.setRegionReplication方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。