當前位置: 首頁>>代碼示例>>Java>>正文


Java HRegionInfo.setOffline方法代碼示例

本文整理匯總了Java中org.apache.hadoop.hbase.HRegionInfo.setOffline方法的典型用法代碼示例。如果您正苦於以下問題:Java HRegionInfo.setOffline方法的具體用法?Java HRegionInfo.setOffline怎麽用?Java HRegionInfo.setOffline使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.apache.hadoop.hbase.HRegionInfo的用法示例。


在下文中一共展示了HRegionInfo.setOffline方法的8個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: resetSplitParent

import org.apache.hadoop.hbase.HRegionInfo; //導入方法依賴的package包/類
/**
 * Reset the split parent region info in meta table
 */
private void resetSplitParent(HbckInfo hi) throws IOException {
  RowMutations mutations = new RowMutations(hi.metaEntry.getRegionName());
  Delete d = new Delete(hi.metaEntry.getRegionName());
  d.deleteColumn(HConstants.CATALOG_FAMILY, HConstants.SPLITA_QUALIFIER);
  d.deleteColumn(HConstants.CATALOG_FAMILY, HConstants.SPLITB_QUALIFIER);
  mutations.add(d);

  HRegionInfo hri = new HRegionInfo(hi.metaEntry);
  hri.setOffline(false);
  hri.setSplit(false);
  Put p = MetaTableAccessor.makePutFromRegionInfo(hri);
  mutations.add(p);

  meta.mutateRow(mutations);
  LOG.info("Reset split parent " + hi.metaEntry.getRegionNameAsString() + " in META" );
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:20,代碼來源:HBaseFsck.java

示例2: cloneRegionInfo

import org.apache.hadoop.hbase.HRegionInfo; //導入方法依賴的package包/類
public static HRegionInfo cloneRegionInfo(TableName tableName, HRegionInfo snapshotRegionInfo) {
  HRegionInfo regionInfo = new HRegionInfo(tableName,
                    snapshotRegionInfo.getStartKey(), snapshotRegionInfo.getEndKey(),
                    snapshotRegionInfo.isSplit(), snapshotRegionInfo.getRegionId());
  regionInfo.setOffline(snapshotRegionInfo.isOffline());
  return regionInfo;
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:8,代碼來源:RestoreSnapshotHelper.java

示例3: offlineParentInMetaAndputMetaEntries

import org.apache.hadoop.hbase.HRegionInfo; //導入方法依賴的package包/類
private void offlineParentInMetaAndputMetaEntries(HConnection hConnection,
    HRegionInfo parent, HRegionInfo splitA, HRegionInfo splitB,
    ServerName serverName, List<Mutation> metaEntries, int regionReplication)
        throws IOException {
  List<Mutation> mutations = metaEntries;
  HRegionInfo copyOfParent = new HRegionInfo(parent);
  copyOfParent.setOffline(true);
  copyOfParent.setSplit(true);

  //Put for parent
  Put putParent = MetaTableAccessor.makePutFromRegionInfo(copyOfParent);
  MetaTableAccessor.addDaughtersToPut(putParent, splitA, splitB);
  mutations.add(putParent);
  
  //Puts for daughters
  Put putA = MetaTableAccessor.makePutFromRegionInfo(splitA);
  Put putB = MetaTableAccessor.makePutFromRegionInfo(splitB);

  addLocation(putA, serverName, 1); //these are new regions, openSeqNum = 1 is fine.
  addLocation(putB, serverName, 1);
  mutations.add(putA);
  mutations.add(putB);

  // Add empty locations for region replicas of daughters so that number of replicas can be
  // cached whenever the primary region is looked up from meta
  for (int i = 1; i < regionReplication; i++) {
    addEmptyLocation(putA, i);
    addEmptyLocation(putB, i);
  }

  MetaTableAccessor.mutateMetaTable(hConnection, mutations);
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:33,代碼來源:SplitTransactionImpl.java

示例4: testCreateTableWithSplitRegion

import org.apache.hadoop.hbase.HRegionInfo; //導入方法依賴的package包/類
@Test (timeout=300000)
public void testCreateTableWithSplitRegion() throws Exception {
  final TableName tableName = TableName.valueOf("testCreateTableWithSplitRegion");
  final MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
  final HMaster m = cluster.getMaster();
  final HTableDescriptor desc = new HTableDescriptor(tableName);
  desc.addFamily(new HColumnDescriptor(FAMILYNAME));
  byte[] splitPoint = Bytes.toBytes("split-point");
  long ts = System.currentTimeMillis();
  HRegionInfo d1 = new HRegionInfo(desc.getTableName(), null, splitPoint, false, ts);
  HRegionInfo d2 = new HRegionInfo(desc.getTableName(), splitPoint, null, false, ts + 1);
  HRegionInfo parent = new HRegionInfo(desc.getTableName(), null, null, true, ts + 2);
  parent.setOffline(true);

  Path tempdir = m.getMasterFileSystem().getTempDir();
  FileSystem fs = m.getMasterFileSystem().getFileSystem();
  Path tempTableDir = FSUtils.getTableDir(tempdir, desc.getTableName());
  fs.delete(tempTableDir, true); // Clean up temp table dir if exists

  final HRegionInfo[] hRegionInfos = new HRegionInfo[] {d1, d2, parent};
  CreateTableHandler handler = new CreateTableHandler(m, m.getMasterFileSystem(),
    desc, cluster.getConfiguration(), hRegionInfos, m);
  handler.prepare();
  handler.process();
  for (int i = 0; i < 100; i++) {
    if (!TEST_UTIL.getHBaseAdmin().isTableAvailable(tableName)) {
      Thread.sleep(300);
    }
  }
  assertTrue(TEST_UTIL.getHBaseAdmin().isTableEnabled(tableName));
  assertTrue(TEST_UTIL.getHBaseAdmin().isTableAvailable(tableName));
  assertTrue(TEST_UTIL.getHBaseAdmin().isTableAvailable(tableName, new byte[][] { splitPoint }));
  RegionStates regionStates = m.getAssignmentManager().getRegionStates();
  assertTrue("Parent should be in SPLIT state",
    regionStates.isRegionInState(parent, State.SPLIT));
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:37,代碼來源:TestCreateTableHandler.java

示例5: testScanDoesNotCleanRegionsWithExistingParents

import org.apache.hadoop.hbase.HRegionInfo; //導入方法依賴的package包/類
/**
 * CatalogJanitor.scan() should not clean parent regions if their own
 * parents are still referencing them. This ensures that grandfather regions
 * do not point to deleted parent regions.
 */
@Test
public void testScanDoesNotCleanRegionsWithExistingParents() throws Exception {
  HBaseTestingUtility htu = new HBaseTestingUtility();
  setRootDirAndCleanIt(htu, "testScanDoesNotCleanRegionsWithExistingParents");
  Server server = new MockServer(htu);
  MasterServices services = new MockMasterServices(server);

  final HTableDescriptor htd = createHTableDescriptor();

  // Create regions: aaa->{lastEndKey}, aaa->ccc, aaa->bbb, bbb->ccc, etc.

  // Parent
  HRegionInfo parent = new HRegionInfo(htd.getTableName(), Bytes.toBytes("aaa"),
    new byte[0], true);
  // Sleep a second else the encoded name on these regions comes out
  // same for all with same start key and made in same second.
  Thread.sleep(1001);

  // Daughter a
  HRegionInfo splita = new HRegionInfo(htd.getTableName(), Bytes.toBytes("aaa"),
    Bytes.toBytes("ccc"), true);
  Thread.sleep(1001);
  // Make daughters of daughter a; splitaa and splitab.
  HRegionInfo splitaa = new HRegionInfo(htd.getTableName(), Bytes.toBytes("aaa"),
    Bytes.toBytes("bbb"), false);
  HRegionInfo splitab = new HRegionInfo(htd.getTableName(), Bytes.toBytes("bbb"),
    Bytes.toBytes("ccc"), false);

  // Daughter b
  HRegionInfo splitb = new HRegionInfo(htd.getTableName(), Bytes.toBytes("ccc"),
      new byte[0]);
  Thread.sleep(1001);

  final Map<HRegionInfo, Result> splitParents =
      new TreeMap<HRegionInfo, Result>(new SplitParentFirstComparator());
  splitParents.put(parent, createResult(parent, splita, splitb));
  splita.setOffline(true); //simulate that splita goes offline when it is split
  splitParents.put(splita, createResult(splita, splitaa,splitab));

  final Map<HRegionInfo, Result> mergedRegions = new TreeMap<HRegionInfo, Result>();
  CatalogJanitor janitor = spy(new CatalogJanitor(server, services));
  doReturn(new Triple<Integer, Map<HRegionInfo, Result>, Map<HRegionInfo, Result>>(
          10, mergedRegions, splitParents)).when(janitor)
      .getMergedRegionsAndSplitParents();

  //create ref from splita to parent
  Path splitaRef =
      createReferences(services, htd, parent, splita, Bytes.toBytes("ccc"), false);

  //parent and A should not be removed
  assertEquals(0, janitor.scan());

  //now delete the ref
  FileSystem fs = FileSystem.get(htu.getConfiguration());
  assertTrue(fs.delete(splitaRef, true));

  //now, both parent, and splita can be deleted
  assertEquals(2, janitor.scan());

  services.stop("test finished");
  janitor.cancel(true);
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:68,代碼來源:TestCatalogJanitor.java

示例6: testLingeringSplitParent

import org.apache.hadoop.hbase.HRegionInfo; //導入方法依賴的package包/類
/**
 * A split parent in meta, in hdfs, and not deployed
 */
@Test (timeout=180000)
public void testLingeringSplitParent() throws Exception {
  TableName table =
      TableName.valueOf("testLingeringSplitParent");
  Table meta = null;
  try {
    setupTable(table);
    assertEquals(ROWKEYS.length, countRows());

    // make sure data in regions, if in wal only there is no data loss
    admin.flush(table);
    HRegionLocation location = tbl.getRegionLocation("B");

    // Delete one region from meta, but not hdfs, unassign it.
    deleteRegion(conf, tbl.getTableDescriptor(), Bytes.toBytes("B"),
      Bytes.toBytes("C"), true, true, false);

    // Create a new meta entry to fake it as a split parent.
    meta = connection.getTable(TableName.META_TABLE_NAME, tableExecutorService);
    HRegionInfo hri = location.getRegionInfo();

    HRegionInfo a = new HRegionInfo(tbl.getName(),
      Bytes.toBytes("B"), Bytes.toBytes("BM"));
    HRegionInfo b = new HRegionInfo(tbl.getName(),
      Bytes.toBytes("BM"), Bytes.toBytes("C"));

    hri.setOffline(true);
    hri.setSplit(true);

    MetaTableAccessor.addRegionToMeta(meta, hri, a, b);
    meta.close();
    admin.flush(TableName.META_TABLE_NAME);

    HBaseFsck hbck = doFsck(conf, false);
    assertErrors(hbck, new ERROR_CODE[] {
      ERROR_CODE.LINGERING_SPLIT_PARENT, ERROR_CODE.HOLE_IN_REGION_CHAIN});

    // regular repair cannot fix lingering split parent
    hbck = doFsck(conf, true);
    assertErrors(hbck, new ERROR_CODE[] {
      ERROR_CODE.LINGERING_SPLIT_PARENT, ERROR_CODE.HOLE_IN_REGION_CHAIN });
    assertFalse(hbck.shouldRerun());
    hbck = doFsck(conf, false);
    assertErrors(hbck, new ERROR_CODE[] {
      ERROR_CODE.LINGERING_SPLIT_PARENT, ERROR_CODE.HOLE_IN_REGION_CHAIN});

    // fix lingering split parent
    hbck = new HBaseFsck(conf, hbfsckExecutorService);
    hbck.connect();
    hbck.setDisplayFullReport(); // i.e. -details
    hbck.setTimeLag(0);
    hbck.setFixSplitParents(true);
    hbck.onlineHbck();
    assertTrue(hbck.shouldRerun());
    hbck.close();

    Get get = new Get(hri.getRegionName());
    Result result = meta.get(get);
    assertTrue(result.getColumnCells(HConstants.CATALOG_FAMILY,
      HConstants.SPLITA_QUALIFIER).isEmpty());
    assertTrue(result.getColumnCells(HConstants.CATALOG_FAMILY,
      HConstants.SPLITB_QUALIFIER).isEmpty());
    admin.flush(TableName.META_TABLE_NAME);

    // fix other issues
    doFsck(conf, true);

    // check that all are fixed
    assertNoErrors(doFsck(conf, false));
    assertEquals(ROWKEYS.length, countRows());
  } finally {
    cleanupTable(table);
    IOUtils.closeQuietly(meta);
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:79,代碼來源:TestHBaseFsck.java

示例7: testMasterRestartAtRegionSplitPendingCatalogJanitor

import org.apache.hadoop.hbase.HRegionInfo; //導入方法依賴的package包/類
/**
 * Verifies HBASE-5806.  Here the case is that splitting is completed but before the
 * CJ could remove the parent region the master is killed and restarted.
 * @throws IOException
 * @throws InterruptedException
 * @throws NodeExistsException
 * @throws KeeperException
 */
@Test (timeout = 300000)
public void testMasterRestartAtRegionSplitPendingCatalogJanitor()
    throws IOException, InterruptedException, NodeExistsException,
    KeeperException, ServiceException {
  final TableName tableName = TableName
      .valueOf("testMasterRestartAtRegionSplitPendingCatalogJanitor");

  // Create table then get the single region for our new table.
  HTable t = createTableAndWait(tableName, HConstants.CATALOG_FAMILY);
  List<HRegion> regions = cluster.getRegions(tableName);
  HRegionInfo hri = getAndCheckSingleTableRegion(regions);

  int tableRegionIndex = ensureTableRegionNotOnSameServerAsMeta(admin, hri);

  // Turn off balancer so it doesn't cut in and mess up our placements.
  this.admin.setBalancerRunning(false, true);
  // Turn off the meta scanner so it don't remove parent on us.
  cluster.getMaster().setCatalogJanitorEnabled(false);
  ZooKeeperWatcher zkw = new ZooKeeperWatcher(t.getConfiguration(),
    "testMasterRestartAtRegionSplitPendingCatalogJanitor", new UselessTestAbortable());
  try {
    // Add a bit of load up into the table so splittable.
    TESTING_UTIL.loadTable(t, HConstants.CATALOG_FAMILY, false);
    // Get region pre-split.
    HRegionServer server = cluster.getRegionServer(tableRegionIndex);
    printOutRegions(server, "Initial regions: ");

    this.admin.split(hri.getRegionNameAsString());
    checkAndGetDaughters(tableName);
    // Assert the ephemeral node is up in zk.
    String path = ZKAssign.getNodeName(zkw, hri.getEncodedName());
    Stat stats = zkw.getRecoverableZooKeeper().exists(path, false);
    LOG.info("EPHEMERAL NODE BEFORE SERVER ABORT, path=" + path + ", stats="
        + stats);
    String node = ZKAssign.getNodeName(zkw, hri.getEncodedName());
    Stat stat = new Stat();
    byte[] data = ZKUtil.getDataNoWatch(zkw, node, stat);
    // ZKUtil.create
    for (int i=0; data != null && i<60; i++) {
      Thread.sleep(1000);
      data = ZKUtil.getDataNoWatch(zkw, node, stat);
    }
    assertNull("Waited too long for ZK node to be removed: "+node, data);

    MockMasterWithoutCatalogJanitor master = abortAndWaitForMaster();

    this.admin = new HBaseAdmin(TESTING_UTIL.getConfiguration());

    // Update the region to be offline and split, so that HRegionInfo#equals
    // returns true in checking rebuilt region states map.
    hri.setOffline(true);
    hri.setSplit(true);
    RegionStates regionStates = master.getAssignmentManager().getRegionStates();
    assertTrue("Split parent should be in SPLIT state",
      regionStates.isRegionInState(hri, State.SPLIT));
    ServerName regionServerOfRegion = regionStates.getRegionServerOfRegion(hri);
    assertTrue(regionServerOfRegion == null);
  } finally {
    this.admin.setBalancerRunning(true, false);
    cluster.getMaster().setCatalogJanitorEnabled(true);
    t.close();
    zkw.close();
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:73,代碼來源:TestSplitTransactionOnCluster.java

示例8: preSplitBeforePONR

import org.apache.hadoop.hbase.HRegionInfo; //導入方法依賴的package包/類
@Override
public void preSplitBeforePONR(ObserverContext<RegionCoprocessorEnvironment> ctx,
    byte[] splitKey, List<Mutation> metaEntries) throws IOException {
  RegionCoprocessorEnvironment environment = ctx.getEnvironment();
  HRegionServer rs = (HRegionServer) environment.getRegionServerServices();
  List<Region> onlineRegions =
      rs.getOnlineRegions(TableName.valueOf("testSplitHooksBeforeAndAfterPONR_2"));
  Region region = onlineRegions.get(0);
  for (Region r : onlineRegions) {
    if (r.getRegionInfo().containsRow(splitKey)) {
      region = r;
      break;
    }
  }
  st = new SplitTransactionImpl((HRegion) region, splitKey);
  if (!st.prepare()) {
    LOG.error("Prepare for the table " + region.getTableDesc().getNameAsString()
        + " failed. So returning null. ");
    ctx.bypass();
    return;
  }
  ((HRegion)region).forceSplit(splitKey);
  daughterRegions = st.stepsBeforePONR(rs, rs, false);
  HRegionInfo copyOfParent = new HRegionInfo(region.getRegionInfo());
  copyOfParent.setOffline(true);
  copyOfParent.setSplit(true);
  // Put for parent
  Put putParent = MetaTableAccessor.makePutFromRegionInfo(copyOfParent);
  MetaTableAccessor.addDaughtersToPut(putParent, daughterRegions.getFirst().getRegionInfo(),
    daughterRegions.getSecond().getRegionInfo());
  metaEntries.add(putParent);
  // Puts for daughters
  Put putA = MetaTableAccessor.makePutFromRegionInfo(
    daughterRegions.getFirst().getRegionInfo());
  Put putB = MetaTableAccessor.makePutFromRegionInfo(
    daughterRegions.getSecond().getRegionInfo());
  st.addLocation(putA, rs.getServerName(), 1);
  st.addLocation(putB, rs.getServerName(), 1);
  metaEntries.add(putA);
  metaEntries.add(putB);
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:42,代碼來源:TestSplitTransactionOnCluster.java


注:本文中的org.apache.hadoop.hbase.HRegionInfo.setOffline方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。