當前位置: 首頁>>代碼示例>>Java>>正文


Java HTableDescriptor.getTableName方法代碼示例

本文整理匯總了Java中org.apache.hadoop.hbase.HTableDescriptor.getTableName方法的典型用法代碼示例。如果您正苦於以下問題:Java HTableDescriptor.getTableName方法的具體用法?Java HTableDescriptor.getTableName怎麽用?Java HTableDescriptor.getTableName使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.apache.hadoop.hbase.HTableDescriptor的用法示例。


在下文中一共展示了HTableDescriptor.getTableName方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: loadTableInfosForTablesWithNoRegion

import org.apache.hadoop.hbase.HTableDescriptor; //導入方法依賴的package包/類
/** Loads table info's for tables that may not have been included, since there are no
 * regions reported for the table, but table dir is there in hdfs
 */
private void loadTableInfosForTablesWithNoRegion() throws IOException {
  Map<String, HTableDescriptor> allTables = new FSTableDescriptors(getConf()).getAll();
  for (HTableDescriptor htd : allTables.values()) {
    if (checkMetaOnly && !htd.isMetaTable()) {
      continue;
    }

    TableName tableName = htd.getTableName();
    if (isTableIncluded(tableName) && !tablesInfo.containsKey(tableName)) {
      TableInfo tableInfo = new TableInfo(tableName);
      tableInfo.htds.add(htd);
      tablesInfo.put(htd.getTableName(), tableInfo);
    }
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:19,代碼來源:HBaseFsck.java

示例2: handleHoleInRegionChain

import org.apache.hadoop.hbase.HTableDescriptor; //導入方法依賴的package包/類
/**
 * There is a hole in the hdfs regions that violates the table integrity
 * rules.  Create a new empty region that patches the hole.
 */
@Override
public void handleHoleInRegionChain(byte[] holeStartKey, byte[] holeStopKey) throws IOException {
  errors.reportError(
      ERROR_CODE.HOLE_IN_REGION_CHAIN,
      "There is a hole in the region chain between "
          + Bytes.toStringBinary(holeStartKey) + " and "
          + Bytes.toStringBinary(holeStopKey)
          + ".  Creating a new regioninfo and region "
          + "dir in hdfs to plug the hole.");
  HTableDescriptor htd = getTableInfo().getHTD();
  HRegionInfo newRegion = new HRegionInfo(htd.getTableName(), holeStartKey, holeStopKey);
  HRegion region = HBaseFsckRepair.createHDFSRegionDir(conf, newRegion, htd);
  LOG.info("Plugged hole by creating new empty region: "+ newRegion + " " +region);
  fixes++;
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:20,代碼來源:HBaseFsck.java

示例3: getSplitPolicyClass

import org.apache.hadoop.hbase.HTableDescriptor; //導入方法依賴的package包/類
public static Class<? extends RegionSplitPolicy> getSplitPolicyClass(
    HTableDescriptor htd, Configuration conf) throws IOException {
  String className = htd.getRegionSplitPolicyClassName();
  if (className == null) {
    className = conf.get(HConstants.HBASE_REGION_SPLIT_POLICY_KEY,
        DEFAULT_SPLIT_POLICY_CLASS.getName());
  }

  try {
    Class<? extends RegionSplitPolicy> clazz =
      Class.forName(className).asSubclass(RegionSplitPolicy.class);
    return clazz;
  } catch (Exception  e) {
    throw new IOException(
        "Unable to load configured region split policy '" +
        className + "' for table '" + htd.getTableName() + "'",
        e);
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:20,代碼來源:RegionSplitPolicy.java

示例4: createTableAsyncV2

import org.apache.hadoop.hbase.HTableDescriptor; //導入方法依賴的package包/類
/**
 * Creates a new table but does not block and wait for it to come online.
 * You can use Future.get(long, TimeUnit) to wait on the operation to complete.
 * It may throw ExecutionException if there was an error while executing the operation
 * or TimeoutException in case the wait timeout was not long enough to allow the
 * operation to complete.
 *
 * @param desc table descriptor for table
 * @param splitKeys keys to check if the table has been created with all split keys
 * @throws IllegalArgumentException Bad table name, if the split keys
 *    are repeated and if the split key has empty byte array.
 * @throws IOException if a remote or network exception occurs
 * @return the result of the async creation. You can use Future.get(long, TimeUnit)
 *    to wait on the operation to complete.
 */
// TODO: This should be called Async but it will break binary compatibility
private Future<Void> createTableAsyncV2(final HTableDescriptor desc, final byte[][] splitKeys)
    throws IOException {
  if (desc.getTableName() == null) {
    throw new IllegalArgumentException("TableName cannot be null");
  }
  if (splitKeys != null && splitKeys.length > 0) {
    Arrays.sort(splitKeys, Bytes.BYTES_COMPARATOR);
    // Verify there are no duplicate split keys
    byte[] lastKey = null;
    for (byte[] splitKey : splitKeys) {
      if (Bytes.compareTo(splitKey, HConstants.EMPTY_BYTE_ARRAY) == 0) {
        throw new IllegalArgumentException(
            "Empty split key must not be passed in the split keys.");
      }
      if (lastKey != null && Bytes.equals(splitKey, lastKey)) {
        throw new IllegalArgumentException("All split keys must be unique, " +
          "found duplicate: " + Bytes.toStringBinary(splitKey) +
          ", " + Bytes.toStringBinary(lastKey));
      }
      lastKey = splitKey;
    }
  }

  CreateTableResponse response = executeCallable(
    new MasterCallable<CreateTableResponse>(getConnection()) {
      @Override
      public CreateTableResponse call(int callTimeout) throws ServiceException {
        PayloadCarryingRpcController controller = rpcControllerFactory.newController();
        controller.setCallTimeout(callTimeout);
        controller.setPriority(desc.getTableName());
        CreateTableRequest request = RequestConverter.buildCreateTableRequest(
          desc, splitKeys, ng.getNonceGroup(), ng.newNonce());
        return master.createTable(controller, request);
      }
    });
  return new CreateTableFuture(this, desc, splitKeys, response);
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:54,代碼來源:HBaseAdmin.java

示例5: initHRegion

import org.apache.hadoop.hbase.HTableDescriptor; //導入方法依賴的package包/類
private void initHRegion (byte [] tableName, String callingMethod, int [] maxVersions,
  byte[] ... families)
throws IOException {
  HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
  int i=0;
  for(byte [] family : families) {
    HColumnDescriptor hcd = new HColumnDescriptor(family);
    hcd.setMaxVersions(maxVersions != null ? maxVersions[i++] : 1);
    htd.addFamily(hcd);
  }
  HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false);
  region = TEST_UTIL.createLocalHRegion(info, htd);
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:14,代碼來源:TestAtomicOperation.java

示例6: getIndexTableRelation

import org.apache.hadoop.hbase.HTableDescriptor; //導入方法依賴的package包/類
public static IndexTableRelation getIndexTableRelation(HTableDescriptor desc) throws IOException {
  byte[] bytes;
  if (desc != null && (bytes = desc.getValue(INDEX_ATTRIBUTE_NAME_BYTES)) != null) {
    ByteArrayInputStream inputStream = new ByteArrayInputStream(bytes);
    DataInputStream dis = new DataInputStream(inputStream);
    IndexTableRelation rl = new IndexTableRelation(desc.getTableName(), IndexType.NoIndex);
    rl.readFields(dis);
    return rl;
  }
  LOG.info("index table relation = null, because " + (desc == null ?
      "desc is null" :
      ("attribute is null for table " + desc.getTableName())));
  return null;
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:15,代碼來源:IndexTableRelation.java

示例7: testSyncRunnerIndexOverflow

import org.apache.hadoop.hbase.HTableDescriptor; //導入方法依賴的package包/類
@Test
public void testSyncRunnerIndexOverflow() throws IOException, NoSuchFieldException,
    SecurityException, IllegalArgumentException, IllegalAccessException {
  final String name = "testSyncRunnerIndexOverflow";
  FSHLog log =
      new FSHLog(fs, FSUtils.getRootDir(conf), name, HConstants.HREGION_OLDLOGDIR_NAME, conf,
          null, true, null, null);
  try {
    Field ringBufferEventHandlerField = FSHLog.class.getDeclaredField("ringBufferEventHandler");
    ringBufferEventHandlerField.setAccessible(true);
    FSHLog.RingBufferEventHandler ringBufferEventHandler =
        (FSHLog.RingBufferEventHandler) ringBufferEventHandlerField.get(log);
    Field syncRunnerIndexField =
        FSHLog.RingBufferEventHandler.class.getDeclaredField("syncRunnerIndex");
    syncRunnerIndexField.setAccessible(true);
    syncRunnerIndexField.set(ringBufferEventHandler, Integer.MAX_VALUE - 1);
    HTableDescriptor htd =
        new HTableDescriptor(TableName.valueOf("t1")).addFamily(new HColumnDescriptor("row"));
    HRegionInfo hri =
        new HRegionInfo(htd.getTableName(), HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
    MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl();
    for (int i = 0; i < 10; i++) {
      addEdits(log, hri, htd, 1, mvcc);
    }
  } finally {
    log.close();
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:29,代碼來源:TestFSHLog.java

示例8: createRegion

import org.apache.hadoop.hbase.HTableDescriptor; //導入方法依賴的package包/類
/**
 * Create a new region in META.
 */
private HRegionInfo createRegion(final HTableDescriptor
    htd, byte[] startKey, byte[] endKey)
    throws IOException {
  Table meta = connection.getTable(TableName.META_TABLE_NAME, tableExecutorService);
  HRegionInfo hri = new HRegionInfo(htd.getTableName(), startKey, endKey);
  MetaTableAccessor.addRegionToMeta(meta, hri);
  meta.close();
  return hri;
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:13,代碼來源:TestHBaseFsck.java

示例9: createTestTable

import org.apache.hadoop.hbase.HTableDescriptor; //導入方法依賴的package包/類
private Table createTestTable(String tableName) throws IOException {
  // Create the test table and open it
  HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
  desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY));
  admin.createTable(desc);
  return new HTable(TEST_UTIL.getConfiguration(), desc.getTableName());
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:8,代碼來源:TestLogRolling.java

示例10: createTableAndFlush

import org.apache.hadoop.hbase.HTableDescriptor; //導入方法依賴的package包/類
private void createTableAndFlush(HTableDescriptor htd) throws Exception {
  HColumnDescriptor hcd = htd.getFamilies().iterator().next();
  // Create the test table
  TEST_UTIL.getHBaseAdmin().createTable(htd);
  TEST_UTIL.waitTableAvailable(htd.getName(), 5000);
  // Create a store file
  Table table = new HTable(conf, htd.getTableName());
  try {
    table.put(new Put(Bytes.toBytes("testrow"))
      .add(hcd.getName(), Bytes.toBytes("q"), Bytes.toBytes("value")));
  } finally {
    table.close();
  }
  TEST_UTIL.getHBaseAdmin().flush(htd.getTableName());
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:16,代碼來源:TestEncryptionKeyRotation.java

示例11: setUp

import org.apache.hadoop.hbase.HTableDescriptor; //導入方法依賴的package包/類
@Override
public void setUp() throws Exception {
  // setup config values necessary for store
  this.conf = TEST_UTIL.getConfiguration();
  this.conf.setLong(HConstants.MAJOR_COMPACTION_PERIOD, 0);
  this.conf.setInt("hbase.hstore.compaction.min", minFiles);
  this.conf.setInt("hbase.hstore.compaction.max", maxFiles);
  this.conf.setLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, minSize);
  this.conf.setLong("hbase.hstore.compaction.max.size", maxSize);
  this.conf.setFloat("hbase.hstore.compaction.ratio", 1.0F);

  //Setting up a Store
  final String id = TestDefaultCompactSelection.class.getName();
  Path basedir = new Path(DIR);
  final Path logdir = new Path(basedir, DefaultWALProvider.getWALDirectoryName(id));
  HColumnDescriptor hcd = new HColumnDescriptor(Bytes.toBytes("family"));
  FileSystem fs = FileSystem.get(conf);

  fs.delete(logdir, true);

  HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(Bytes.toBytes("table")));
  htd.addFamily(hcd);
  HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false);

  final Configuration walConf = new Configuration(conf);
  FSUtils.setRootDir(walConf, basedir);
  wals = new WALFactory(walConf, null, id);
  region = HRegion.createHRegion(info, basedir, conf, htd);
  HRegion.closeHRegion(region);
  Path tableDir = FSUtils.getTableDir(basedir, htd.getTableName());
  region = new HRegion(tableDir, wals.getWAL(info.getEncodedNameAsBytes()), fs, conf, info, htd,
      null);

  store = new HStore(region, hcd, conf);

  TEST_FILE = region.getRegionFileSystem().createTempName();
  fs.createNewFile(TEST_FILE);
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:39,代碼來源:TestDefaultCompactSelection.java

示例12: postCreateTableHandler

import org.apache.hadoop.hbase.HTableDescriptor; //導入方法依賴的package包/類
@Override
public void postCreateTableHandler(final ObserverContext<MasterCoprocessorEnvironment> c,
    HTableDescriptor desc, HRegionInfo[] regions) throws IOException {
  // When AC is used, it should be configured as the 1st CP.
  // In Master, the table operations like create, are handled by a Thread pool but the max size
  // for this pool is 1. So if multiple CPs create tables on startup, these creations will happen
  // sequentially only.
  // Related code in HMaster#startServiceThreads
  // {code}
  //   // We depend on there being only one instance of this executor running
  //   // at a time. To do concurrency, would need fencing of enable/disable of
  //   // tables.
  //   this.service.startExecutorService(ExecutorType.MASTER_TABLE_OPERATIONS, 1);
  // {code}
  // In future if we change this pool to have more threads, then there is a chance for thread,
  // creating acl table, getting delayed and by that time another table creation got over and
  // this hook is getting called. In such a case, we will need a wait logic here which will
  // wait till the acl table is created.
  if (AccessControlLists.isAclTable(desc)) {
    this.aclTabAvailable = true;
  } else if (!(TableName.NAMESPACE_TABLE_NAME.equals(desc.getTableName()))) {
    if (!aclTabAvailable) {
      LOG.warn("Not adding owner permission for table " + desc.getTableName() + ". "
          + AccessControlLists.ACL_TABLE_NAME + " is not yet created. "
          + getClass().getSimpleName() + " should be configured as the first Coprocessor");
    } else {
      String owner = desc.getOwnerString();
      // default the table owner to current user, if not specified.
      if (owner == null)
        owner = getActiveUser().getShortName();
      final UserPermission userperm = new UserPermission(Bytes.toBytes(owner),
          desc.getTableName(), null, Action.values());
      // switch to the real hbase master user for doing the RPC on the ACL table
      User.runAsLoginUser(new PrivilegedExceptionAction<Void>() {
        @Override
        public Void run() throws Exception {
          AccessControlLists.addUserPermission(c.getEnvironment().getConfiguration(),
              userperm);
          return null;
        }
      });
    }
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:45,代碼來源:AccessController.java

示例13: testFilterListWithPrefixFilter

import org.apache.hadoop.hbase.HTableDescriptor; //導入方法依賴的package包/類
@Test
public void testFilterListWithPrefixFilter() throws IOException {
  byte[] family = Bytes.toBytes("f1");
  byte[] qualifier = Bytes.toBytes("q1");
  HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("TestFilter"));
  htd.addFamily(new HColumnDescriptor(family));
  HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false);
  HRegion testRegion = HRegion.createHRegion(info, TEST_UTIL.getDataTestDir(),
      TEST_UTIL.getConfiguration(), htd);

  for(int i=0; i<5; i++) {
    Put p = new Put(Bytes.toBytes((char)('a'+i) + "row"));
    p.setDurability(Durability.SKIP_WAL);
    p.add(family, qualifier, Bytes.toBytes(String.valueOf(111+i)));
    testRegion.put(p);
  }
  testRegion.flush(true);

  // rows starting with "b"
  PrefixFilter pf = new PrefixFilter(new byte[] {'b'}) ;
  // rows with value of column 'q1' set to '113'
  SingleColumnValueFilter scvf = new SingleColumnValueFilter(
      family, qualifier, CompareOp.EQUAL, Bytes.toBytes("113"));
  // combine these two with OR in a FilterList
  FilterList filterList = new FilterList(Operator.MUST_PASS_ONE, pf, scvf);

  Scan s1 = new Scan();
  s1.setFilter(filterList);
  InternalScanner scanner = testRegion.getScanner(s1);
  List<Cell> results = new ArrayList<Cell>();
  int resultCount = 0;
  while (scanner.next(results)) {
    resultCount++;
    byte[] row =  CellUtil.cloneRow(results.get(0));
    LOG.debug("Found row: " + Bytes.toStringBinary(row));
    assertTrue(Bytes.equals(row, Bytes.toBytes("brow"))
        || Bytes.equals(row, Bytes.toBytes("crow")));
    results.clear();
  }
  assertEquals(2, resultCount);
  scanner.close();

  WAL wal = ((HRegion)testRegion).getWAL();
  ((HRegion)testRegion).close();
  wal.close();
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:47,代碼來源:TestFilter.java

示例14: testRegionInfoFileCreation

import org.apache.hadoop.hbase.HTableDescriptor; //導入方法依賴的package包/類
/**
   * Verifies that the .regioninfo file is written on region creation and that
   * is recreated if missing during region opening.
   */
  @Test
  public void testRegionInfoFileCreation() throws IOException {
    Path rootDir = new Path(dir + "testRegionInfoFileCreation");

    HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("testtb"));
    htd.addFamily(new HColumnDescriptor("cf"));

    HRegionInfo hri = new HRegionInfo(htd.getTableName());

    // Create a region and skip the initialization (like CreateTableHandler)
    HRegion region = HRegion.createHRegion(hri, rootDir, CONF, htd, null, false, true);
//    HRegion region = TEST_UTIL.createLocalHRegion(hri, htd);
    Path regionDir = region.getRegionFileSystem().getRegionDir();
    FileSystem fs = region.getRegionFileSystem().getFileSystem();
    HRegion.closeHRegion(region);

    Path regionInfoFile = new Path(regionDir, HRegionFileSystem.REGION_INFO_FILE);

    // Verify that the .regioninfo file is present
    assertTrue(HRegionFileSystem.REGION_INFO_FILE + " should be present in the region dir",
        fs.exists(regionInfoFile));

    // Try to open the region
    region = HRegion.openHRegion(rootDir, hri, htd, null, CONF);
    assertEquals(regionDir, region.getRegionFileSystem().getRegionDir());
    HRegion.closeHRegion(region);

    // Verify that the .regioninfo file is still there
    assertTrue(HRegionFileSystem.REGION_INFO_FILE + " should be present in the region dir",
        fs.exists(regionInfoFile));

    // Remove the .regioninfo file and verify is recreated on region open
    fs.delete(regionInfoFile, true);
    assertFalse(HRegionFileSystem.REGION_INFO_FILE + " should be removed from the region dir",
        fs.exists(regionInfoFile));

    region = HRegion.openHRegion(rootDir, hri, htd, null, CONF);
//    region = TEST_UTIL.openHRegion(hri, htd);
    assertEquals(regionDir, region.getRegionFileSystem().getRegionDir());
    HRegion.closeHRegion(region);

    // Verify that the .regioninfo file is still there
    assertTrue(HRegionFileSystem.REGION_INFO_FILE + " should be present in the region dir",
        fs.exists(new Path(regionDir, HRegionFileSystem.REGION_INFO_FILE)));
  }
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:50,代碼來源:TestHRegion.java

示例15: testFailedFlushAborts

import org.apache.hadoop.hbase.HTableDescriptor; //導入方法依賴的package包/類
/**
 * Test that if we fail a flush, abort gets set on close.
 * @see <a href="https://issues.apache.org/jira/browse/HBASE-4270">HBASE-4270</a>
 * @throws IOException
 * @throws NodeExistsException
 * @throws KeeperException
 */
@Test public void testFailedFlushAborts()
throws IOException, NodeExistsException, KeeperException {
  final Server server = new MockServer(HTU, false);
  final RegionServerServices rss = HTU.createMockRegionServerService();
  HTableDescriptor htd = TEST_HTD;
  final HRegionInfo hri =
    new HRegionInfo(htd.getTableName(), HConstants.EMPTY_END_ROW,
      HConstants.EMPTY_END_ROW);
  HRegion region = HTU.createLocalHRegion(hri,  htd);
  try {
    assertNotNull(region);
    // Spy on the region so can throw exception when close is called.
    HRegion spy = Mockito.spy(region);
    final boolean abort = false;
    Mockito.when(spy.close(abort)).
    thenThrow(new IOException("Mocked failed close!"));
    // The CloseRegionHandler will try to get an HRegion that corresponds
    // to the passed hri -- so insert the region into the online region Set.
    rss.addToOnlineRegions(spy);
    // Assert the Server is NOT stopped before we call close region.
    assertFalse(server.isStopped());

    ZkCoordinatedStateManager consensusProvider = new ZkCoordinatedStateManager();
    consensusProvider.initialize(server);
    consensusProvider.start();

    ZkCloseRegionCoordination.ZkCloseRegionDetails zkCrd =
      new ZkCloseRegionCoordination.ZkCloseRegionDetails();
    zkCrd.setPublishStatusInZk(false);
    zkCrd.setExpectedVersion(-1);

    CloseRegionHandler handler = new CloseRegionHandler(server, rss, hri, false,
          consensusProvider.getCloseRegionCoordination(), zkCrd);
    boolean throwable = false;
    try {
      handler.process();
    } catch (Throwable t) {
      throwable = true;
    } finally {
      assertTrue(throwable);
      // Abort calls stop so stopped flag should be set.
      assertTrue(server.isStopped());
    }
  } finally {
    HRegion.closeHRegion(region);
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:55,代碼來源:TestCloseRegionHandler.java


注:本文中的org.apache.hadoop.hbase.HTableDescriptor.getTableName方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。