当前位置: 首页>>代码示例>>Java>>正文


Java HRegion.openHRegion方法代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.regionserver.HRegion.openHRegion方法的典型用法代码示例。如果您正苦于以下问题:Java HRegion.openHRegion方法的具体用法?Java HRegion.openHRegion怎么用?Java HRegion.openHRegion使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hbase.regionserver.HRegion的用法示例。


在下文中一共展示了HRegion.openHRegion方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: ClientSideRegionScanner

import org.apache.hadoop.hbase.regionserver.HRegion; //导入方法依赖的package包/类
public ClientSideRegionScanner(Configuration conf, FileSystem fs,
    Path rootDir, HTableDescriptor htd, HRegionInfo hri, Scan scan, ScanMetrics scanMetrics)
        throws IOException {

  // region is immutable, set isolation level
  scan.setIsolationLevel(IsolationLevel.READ_UNCOMMITTED);

  // open region from the snapshot directory
  this.region = HRegion.openHRegion(conf, fs, rootDir, hri, htd, null, null, null);

  // create an internal region scanner
  this.scanner = region.getScanner(scan);
  values = new ArrayList<Cell>();

  if (scanMetrics == null) {
    initScanMetrics(scan);
  } else {
    this.scanMetrics = scanMetrics;
  }
  region.startRegionOperation();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:22,代码来源:ClientSideRegionScanner.java

示例2: reopenRegion

import org.apache.hadoop.hbase.regionserver.HRegion; //导入方法依赖的package包/类
Region reopenRegion(final Region closedRegion, Class<?> ... implClasses)
    throws IOException {
  //HRegionInfo info = new HRegionInfo(tableName, null, null, false);
  Region r = HRegion.openHRegion(closedRegion, null);

  // this following piece is a hack. currently a coprocessorHost
  // is secretly loaded at OpenRegionHandler. we don't really
  // start a region server here, so just manually create cphost
  // and set it to region.
  Configuration conf = TEST_UTIL.getConfiguration();
  RegionCoprocessorHost host = new RegionCoprocessorHost(r, null, conf);
  ((HRegion)r).setCoprocessorHost(host);

  for (Class<?> implClass : implClasses) {
    host.load(implClass, Coprocessor.PRIORITY_USER, conf);
  }
  // we need to manually call pre- and postOpen here since the
  // above load() is not the real case for CP loading. A CP is
  // expected to be loaded by default from 1) configuration; or 2)
  // HTableDescriptor. If it's loaded after HRegion initialized,
  // the pre- and postOpen() won't be triggered automatically.
  // Here we have to call pre and postOpen explicitly.
  host.preOpen();
  host.postOpen();
  return r;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:27,代码来源:TestCoprocessorInterface.java

示例3: mergeAndVerify

import org.apache.hadoop.hbase.regionserver.HRegion; //导入方法依赖的package包/类
private HRegion mergeAndVerify(final String msg, final String regionName1,
  final String regionName2, final WAL log, final int upperbound)
throws Exception {
  Merge merger = new Merge(this.conf);
  LOG.info(msg);
  LOG.info("fs2=" + this.conf.get("fs.defaultFS"));
  int errCode = ToolRunner.run(this.conf, merger,
    new String[] {this.desc.getTableName().getNameAsString(), regionName1, regionName2}
  );
  assertTrue("'" + msg + "' failed with errCode " + errCode, errCode == 0);
  HRegionInfo mergedInfo = merger.getMergedHRegionInfo();

  // Now verify that we can read all the rows from regions 0, 1
  // in the new merged region.
  HRegion merged = HRegion.openHRegion(mergedInfo, this.desc, log, this.conf);
  verifyMerge(merged, upperbound);
  merged.close();
  LOG.info("Verified " + msg);
  return merged;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:21,代码来源:TestMergeTool.java

示例4: openMetaRegion

import org.apache.hadoop.hbase.regionserver.HRegion; //导入方法依赖的package包/类
private synchronized HRegion openMetaRegion() throws IOException {
  if (this.metaRegion != null) {
    return this.metaRegion;
  }
  this.metaRegion = HRegion.openHRegion(HRegionInfo.FIRST_META_REGIONINFO,
    descriptors.get(TableName.META_TABLE_NAME), getLog(HRegionInfo.FIRST_META_REGIONINFO),
    this.conf);
  this.metaRegion.compactStores();
  return this.metaRegion;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:11,代码来源:MetaUtils.java

示例5: merge

import org.apache.hadoop.hbase.regionserver.HRegion; //导入方法依赖的package包/类
private HRegion merge(final HTableDescriptor htd, HRegion meta,
                      HRegionInfo info1, HRegionInfo info2)
throws IOException {
  if (info1 == null) {
    throw new IOException("Could not find " + Bytes.toStringBinary(region1) + " in " +
        Bytes.toStringBinary(meta.getRegionInfo().getRegionName()));
  }
  if (info2 == null) {
    throw new IOException("Could not find " + Bytes.toStringBinary(region2) + " in " +
        Bytes.toStringBinary(meta.getRegionInfo().getRegionName()));
  }
  HRegion merged = null;
  HRegion r1 = HRegion.openHRegion(info1, htd, utils.getLog(info1), getConf());
  try {
    HRegion r2 = HRegion.openHRegion(info2, htd, utils.getLog(info2), getConf());
    try {
      merged = HRegion.merge(r1, r2);
    } finally {
      if (!r2.isClosed()) {
        r2.close();
      }
    }
  } finally {
    if (!r1.isClosed()) {
      r1.close();
    }
  }

  // Remove the old regions from meta.
  // HRegion.merge has already deleted their files

  removeRegionFromMeta(meta, info1);
  removeRegionFromMeta(meta, info2);

  this.mergeInfo = merged.getRegionInfo();
  return merged;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:38,代码来源:Merge.java

示例6: merge

import org.apache.hadoop.hbase.regionserver.HRegion; //导入方法依赖的package包/类
protected boolean merge(final HRegionInfo[] info) throws IOException {
  if (info.length < 2) {
    LOG.info("only one region - nothing to merge");
    return false;
  }

  HRegion currentRegion = null;
  long currentSize = 0;
  HRegion nextRegion = null;
  long nextSize = 0;
  for (int i = 0; i < info.length - 1; i++) {
    if (currentRegion == null) {
      currentRegion = HRegion.openHRegion(conf, fs, this.rootDir, info[i], this.htd,
          walFactory.getWAL(info[i].getEncodedNameAsBytes()));
      currentSize = currentRegion.getLargestHStoreSize();
    }
    nextRegion = HRegion.openHRegion(conf, fs, this.rootDir, info[i + 1], this.htd,
        walFactory.getWAL(info[i+1].getEncodedNameAsBytes()));
    nextSize = nextRegion.getLargestHStoreSize();

    if ((currentSize + nextSize) <= (maxFilesize / 2)) {
      // We merge two adjacent regions if their total size is less than
      // one half of the desired maximum size
      LOG.info("Merging regions " + currentRegion.getRegionInfo().getRegionNameAsString() +
        " and " + nextRegion.getRegionInfo().getRegionNameAsString());
      HRegion mergedRegion =
        HRegion.mergeAdjacent(currentRegion, nextRegion);
      updateMeta(currentRegion.getRegionInfo().getRegionName(),
        nextRegion.getRegionInfo().getRegionName(), mergedRegion);
      break;
    }
    LOG.info("not merging regions " +
      Bytes.toStringBinary(currentRegion.getRegionInfo().getRegionName()) +
        " and " + Bytes.toStringBinary(nextRegion.getRegionInfo().getRegionName()));
    currentRegion.close();
    currentRegion = nextRegion;
    currentSize = nextSize;
  }
  if(currentRegion != null) {
    currentRegion.close();
  }
  return true;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:44,代码来源:HMerge.java

示例7: openRegion

import org.apache.hadoop.hbase.regionserver.HRegion; //导入方法依赖的package包/类
/**
 * @return Instance of HRegion if successful open else null.
 */
HRegion openRegion() {
  HRegion region = null;
  try {
    // Instantiate the region.  This also periodically tickles OPENING
    // state so master doesn't timeout this region in transition.
    region = HRegion.openHRegion(this.regionInfo, this.htd,
      this.rsServices.getWAL(this.regionInfo),
      this.server.getConfiguration(),
      this.rsServices,
      new CancelableProgressable() {
        @Override
        public boolean progress() {
          if (useZKForAssignment) {
            // if tickle failed, we need to cancel opening region.
            return coordination.tickleOpening(ord, regionInfo,
              rsServices, "open_region_progress");
          }
          if (!isRegionStillOpening()) {
            LOG.warn("Open region aborted since it isn't opening any more");
            return false;
          }
          return true;
        }
      });
  } catch (Throwable t) {
    // We failed open. Our caller will see the 'null' return value
    // and transition the node back to FAILED_OPEN. If that fails,
    // we rely on the Timeout Monitor in the master to reassign.
    LOG.error(
        "Failed open of region=" + this.regionInfo.getRegionNameAsString()
            + ", starting to roll back the global memstore size.", t);
    // Decrease the global memstore size.
    if (this.rsServices != null) {
      RegionServerAccounting rsAccounting =
        this.rsServices.getRegionServerAccounting();
      if (rsAccounting != null) {
        rsAccounting.rollbackRegionReplayEditsSize(this.regionInfo.getRegionName());
      }
    }
  }
  return region;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:46,代码来源:OpenRegionHandler.java

示例8: openClosedRegion

import org.apache.hadoop.hbase.regionserver.HRegion; //导入方法依赖的package包/类
protected HRegion openClosedRegion(final HRegion closedRegion)
throws IOException {
  return HRegion.openHRegion(closedRegion, null);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:5,代码来源:HBaseTestCase.java

示例9: testRegionShouldNotBeDeployed

import org.apache.hadoop.hbase.regionserver.HRegion; //导入方法依赖的package包/类
/**
 * The region is not deployed when the table is disabled.
 */
@Test (timeout=180000)
public void testRegionShouldNotBeDeployed() throws Exception {
  TableName table =
      TableName.valueOf("tableRegionShouldNotBeDeployed");
  try {
    LOG.info("Starting testRegionShouldNotBeDeployed.");
    MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
    assertTrue(cluster.waitForActiveAndReadyMaster());


    byte[][] SPLIT_KEYS = new byte[][] { new byte[0], Bytes.toBytes("aaa"),
        Bytes.toBytes("bbb"), Bytes.toBytes("ccc"), Bytes.toBytes("ddd") };
    HTableDescriptor htdDisabled = new HTableDescriptor(table);
    htdDisabled.addFamily(new HColumnDescriptor(FAM));

    // Write the .tableinfo
    FSTableDescriptors fstd = new FSTableDescriptors(conf);
    fstd.createTableDescriptor(htdDisabled);
    List<HRegionInfo> disabledRegions =
        TEST_UTIL.createMultiRegionsInMeta(conf, htdDisabled, SPLIT_KEYS);

    // Let's just assign everything to first RS
    HRegionServer hrs = cluster.getRegionServer(0);

    // Create region files.
    admin.disableTable(table);
    admin.enableTable(table);

    // Disable the table and close its regions
    admin.disableTable(table);
    HRegionInfo region = disabledRegions.remove(0);
    byte[] regionName = region.getRegionName();

    // The region should not be assigned currently
    assertTrue(cluster.getServerWith(regionName) == -1);

    // Directly open a region on a region server.
    // If going through AM/ZK, the region won't be open.
    // Even it is opened, AM will close it which causes
    // flakiness of this test.
    HRegion r = HRegion.openHRegion(
      region, htdDisabled, hrs.getWAL(region), conf);
    hrs.addToOnlineRegions(r);

    HBaseFsck hbck = doFsck(conf, false);
    assertErrors(hbck, new ERROR_CODE[] { ERROR_CODE.SHOULD_NOT_BE_DEPLOYED });

    // fix this fault
    doFsck(conf, true);

    // check result
    assertNoErrors(doFsck(conf, false));
  } finally {
    admin.enableTable(table);
    cleanupTable(table);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:61,代码来源:TestHBaseFsck.java

示例10: testFlushSequenceIdIsGreaterThanAllEditsInHFile

import org.apache.hadoop.hbase.regionserver.HRegion; //导入方法依赖的package包/类
/**
 * Test flush for sure has a sequence id that is beyond the last edit appended.  We do this
 * by slowing appends in the background ring buffer thread while in foreground we call
 * flush.  The addition of the sync over HRegion in flush should fix an issue where flush was
 * returning before all of its appends had made it out to the WAL (HBASE-11109).
 * @throws IOException
 * @see HBASE-11109
 */
@Test
public void testFlushSequenceIdIsGreaterThanAllEditsInHFile() throws IOException {
  String testName = "testFlushSequenceIdIsGreaterThanAllEditsInHFile";
  final TableName tableName = TableName.valueOf(testName);
  final HRegionInfo hri = new HRegionInfo(tableName);
  final byte[] rowName = tableName.getName();
  final HTableDescriptor htd = new HTableDescriptor(tableName);
  htd.addFamily(new HColumnDescriptor("f"));
  HRegion r = HRegion.createHRegion(hri, TEST_UTIL.getDefaultRootDirPath(),
    TEST_UTIL.getConfiguration(), htd);
  HRegion.closeHRegion(r);
  final int countPerFamily = 10;
  final MutableBoolean goslow = new MutableBoolean(false);
  // subclass and doctor a method.
  FSHLog wal = new FSHLog(FileSystem.get(conf), TEST_UTIL.getDefaultRootDirPath(),
      testName, conf) {
    @Override
    void atHeadOfRingBufferEventHandlerAppend() {
      if (goslow.isTrue()) {
        Threads.sleep(100);
        LOG.debug("Sleeping before appending 100ms");
      }
      super.atHeadOfRingBufferEventHandlerAppend();
    }
  };
  HRegion region = HRegion.openHRegion(TEST_UTIL.getConfiguration(),
    TEST_UTIL.getTestFileSystem(), TEST_UTIL.getDefaultRootDirPath(), hri, htd, wal);
  EnvironmentEdge ee = EnvironmentEdgeManager.getDelegate();
  try {
    List<Put> puts = null;
    for (HColumnDescriptor hcd: htd.getFamilies()) {
      puts =
        TestWALReplay.addRegionEdits(rowName, hcd.getName(), countPerFamily, ee, region, "x");
    }

    // Now assert edits made it in.
    final Get g = new Get(rowName);
    Result result = region.get(g);
    assertEquals(countPerFamily * htd.getFamilies().size(), result.size());

    // Construct a WALEdit and add it a few times to the WAL.
    WALEdit edits = new WALEdit();
    for (Put p: puts) {
      CellScanner cs = p.cellScanner();
      while (cs.advance()) {
        edits.add(cs.current());
      }
    }
    // Add any old cluster id.
    List<UUID> clusterIds = new ArrayList<UUID>();
    clusterIds.add(UUID.randomUUID());
    // Now make appends run slow.
    goslow.setValue(true);
    for (int i = 0; i < countPerFamily; i++) {
      final HRegionInfo info = region.getRegionInfo();
      final WALKey logkey = new WALKey(info.getEncodedNameAsBytes(), tableName,
          System.currentTimeMillis(), clusterIds, -1, -1, region.getMVCC());
      wal.append(htd, info, logkey, edits, true);
    }
    region.flush(true);
    // FlushResult.flushSequenceId is not visible here so go get the current sequence id.
    long currentSequenceId = region.getSequenceId();
    // Now release the appends
    goslow.setValue(false);
    synchronized (goslow) {
      goslow.notifyAll();
    }
    assertTrue(currentSequenceId >= region.getSequenceId());
  } finally {
    region.close(true);
    wal.close();
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:82,代码来源:TestFSHLog.java


注:本文中的org.apache.hadoop.hbase.regionserver.HRegion.openHRegion方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。