当前位置: 首页>>代码示例>>Java>>正文


Java Table类代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.client.Table的典型用法代码示例。如果您正苦于以下问题:Java Table类的具体用法?Java Table怎么用?Java Table使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


Table类属于org.apache.hadoop.hbase.client包,在下文中一共展示了Table类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: runScanner

import org.apache.hadoop.hbase.client.Table; //导入依赖的package包/类
private void runScanner(Table hTable, int expectedSize, Filter... filters) throws IOException {
  String cf = "f";
  Scan scan = new Scan();
  scan.addFamily(cf.getBytes());
  FilterList filterList = new FilterList(filters);
  scan.setFilter(filterList);

  ResultScanner scanner = hTable.getScanner(scan);
  List<Cell> results = new ArrayList<Cell>();
  Result result;
  long timeBeforeScan = System.currentTimeMillis();
  while ((result = scanner.next()) != null) {
    for (Cell kv : result.listCells()) {
      LOG.info("Got rk: " + Bytes.toStringBinary(CellUtil.cloneRow(kv)) + " cq: "
              + Bytes.toStringBinary(CellUtil.cloneQualifier(kv)));
      results.add(kv);
    }
  }
  long scanTime = System.currentTimeMillis() - timeBeforeScan;
  scanner.close();

  LOG.info("scan time = " + scanTime + "ms");
  LOG.info("found " + results.size() + " results");

  assertEquals(expectedSize, results.size());
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:27,代码来源:TestFuzzyRowAndColumnRangeFilter.java

示例2: deleteAndWait

import org.apache.hadoop.hbase.client.Table; //导入依赖的package包/类
private void deleteAndWait(byte[] row, Table source, Table... targets)
throws Exception {
  Delete del = new Delete(row);
  source.delete(del);

  Get get = new Get(row);
  for (int i = 0; i < NB_RETRIES; i++) {
    if (i==NB_RETRIES-1) {
      fail("Waited too much time for del replication");
    }
    boolean removedFromAll = true;
    for (Table target : targets) {
      Result res = target.get(get);
      if (res.size() >= 1) {
        LOG.info("Row not deleted");
        removedFromAll = false;
        break;
      }
    }
    if (removedFromAll) {
      break;
    } else {
      Thread.sleep(SLEEP_TIME);
    }
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:27,代码来源:TestMultiSlaveReplication.java

示例3: removeUserPermission

import org.apache.hadoop.hbase.client.Table; //导入依赖的package包/类
/**
 * Removes a previously granted permission from the stored access control
 * lists.  The {@link TablePermission} being removed must exactly match what
 * is stored -- no wildcard matching is attempted.  Ie, if user "bob" has
 * been granted "READ" access to the "data" table, but only to column family
 * plus qualifier "info:colA", then trying to call this method with only
 * user "bob" and the table name "data" (but without specifying the
 * column qualifier "info:colA") will have no effect.
 *
 * @param conf the configuration
 * @param userPerm the details of the permission to be revoked
 * @throws IOException if there is an error accessing the metadata table
 */
static void removeUserPermission(Configuration conf, UserPermission userPerm)
    throws IOException {
  Delete d = new Delete(userPermissionRowKey(userPerm));
  byte[] key = userPermissionKey(userPerm);

  if (LOG.isDebugEnabled()) {
    LOG.debug("Removing permission "+ userPerm.toString());
  }
  d.addColumns(ACL_LIST_FAMILY, key);
  // TODO: Pass in a Connection rather than create one each time.
  try (Connection connection = ConnectionFactory.createConnection(conf)) {
    try (Table table = connection.getTable(ACL_TABLE_NAME)) {
      table.delete(d);
    }
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:30,代码来源:AccessControlLists.java

示例4: testIncrementHook

import org.apache.hadoop.hbase.client.Table; //导入依赖的package包/类
@Test (timeout=300000)
public void testIncrementHook() throws IOException {
  TableName tableName = TableName.valueOf(TEST_TABLE.getNameAsString() + ".testIncrementHook");
  Table table = util.createTable(tableName, new byte[][] {A, B, C});
  try {
    Increment inc = new Increment(Bytes.toBytes(0));
    inc.addColumn(A, A, 1);

    verifyMethodResult(SimpleRegionObserver.class,
        new String[] {"hadPreIncrement", "hadPostIncrement", "hadPreIncrementAfterRowLock"},
        tableName,
        new Boolean[] {false, false, false}
        );

    table.increment(inc);

    verifyMethodResult(SimpleRegionObserver.class,
        new String[] {"hadPreIncrement", "hadPostIncrement", "hadPreIncrementAfterRowLock"},
        tableName,
        new Boolean[] {true, true, true}
        );
  } finally {
    util.deleteTable(tableName);
    table.close();
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:27,代码来源:TestRegionObserverInterface.java

示例5: insertData

import org.apache.hadoop.hbase.client.Table; //导入依赖的package包/类
private static int insertData(TableName tableName, String column, double prob) throws IOException {
  byte[] k = new byte[3];
  byte[][] famAndQf = KeyValue.parseColumn(Bytes.toBytes(column));

  List<Put> puts = new ArrayList<>();
  for (int i = 0; i < 9; i++) {
    Put put = new Put(Bytes.toBytes("row" + i));
    put.setDurability(Durability.SKIP_WAL);
    put.add(famAndQf[0], famAndQf[1], k);
    put.setCellVisibility(new CellVisibility("(" + SECRET + "|" + CONFIDENTIAL + ")" + "&" + "!"
        + TOPSECRET));
    puts.add(put);
  }
  try (Table table = new HTable(TEST_UTIL.getConfiguration(), tableName)) {
    table.put(puts);
  }
  return puts.size();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:19,代码来源:TestScannersWithLabels.java

示例6: getMetaTableRows

import org.apache.hadoop.hbase.client.Table; //导入依赖的package包/类
/**
 * Returns all rows from the hbase:meta table for a given user table
 *
 * @throws IOException When reading the rows fails.
 */
public List<byte[]> getMetaTableRows(TableName tableName) throws IOException {
  // TODO: Redo using MetaTableAccessor.
  Table t = new HTable(new Configuration(this.conf), TableName.META_TABLE_NAME);
  List<byte[]> rows = new ArrayList<byte[]>();
  ResultScanner s = t.getScanner(new Scan());
  for (Result result : s) {
    HRegionInfo info = HRegionInfo.getHRegionInfo(result);
    if (info == null) {
      LOG.error("No region info for row " + Bytes.toString(result.getRow()));
      // TODO figure out what to do for this new hosed case.
      continue;
    }

    if (info.getTable().equals(tableName)) {
      LOG.info("getMetaTableRows: row -> " +
          Bytes.toStringBinary(result.getRow()) + info);
      rows.add(result.getRow());
    }
  }
  s.close();
  t.close();
  return rows;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:29,代码来源:HBaseTestingUtility.java

示例7: doCheckAndDelete

import org.apache.hadoop.hbase.client.Table; //导入依赖的package包/类
boolean doCheckAndDelete(final byte[] row, final byte[] family, final byte[] qualifier, final byte[] value,
        final Delete delete) {
    boolean result;
    final Table tableInterface = getTable();
    try {
        result = doCheckAndDelete(tableInterface, row, family, qualifier, value, delete);
    } finally {
        closeTable(tableInterface);
    }
    return result;
}
 
开发者ID:gchq,项目名称:stroom-stats,代码行数:12,代码来源:HBaseTable.java

示例8: testNoEdits

import org.apache.hadoop.hbase.client.Table; //导入依赖的package包/类
/**
 * Tests that the LogRoller perform the roll even if there are no edits
 */
@Test
public void testNoEdits() throws Exception {
  TableName tableName = TableName.valueOf("TestLogRollPeriodNoEdits");
  TEST_UTIL.createTable(tableName, "cf");
  try {
    Table table = new HTable(TEST_UTIL.getConfiguration(), tableName);
    try {
      HRegionServer server = TEST_UTIL.getRSForFirstRegionInTable(tableName);
      WAL log = server.getWAL(null);
      checkMinLogRolls(log, 5);
    } finally {
      table.close();
    }
  } finally {
    TEST_UTIL.deleteTable(tableName);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:21,代码来源:TestLogRollPeriod.java

示例9: blockUntilRegionIsOpened

import org.apache.hadoop.hbase.client.Table; //导入依赖的package包/类
public static void blockUntilRegionIsOpened(Configuration conf, long timeout, HRegionInfo hri)
    throws IOException, InterruptedException {
  log("blocking until region is opened for reading:" + hri.getRegionNameAsString());
  long start = System.currentTimeMillis();
  try (Connection conn = ConnectionFactory.createConnection(conf);
      Table table = conn.getTable(hri.getTable())) {
    byte[] row = hri.getStartKey();
    // Check for null/empty row. If we find one, use a key that is likely to be in first region.
    if (row == null || row.length <= 0) row = new byte[] { '0' };
    Get get = new Get(row);
    while (System.currentTimeMillis() - start < timeout) {
      try {
        table.get(get);
        break;
      } catch (IOException ex) {
        // wait some more
      }
      Threads.sleep(10);
    }
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:22,代码来源:TestEndToEndSplitTransaction.java

示例10: assertMetaLocation

import org.apache.hadoop.hbase.client.Table; //导入依赖的package包/类
public static void assertMetaLocation(Table meta, byte[] row, ServerName serverName,
    long seqNum, int replicaId, boolean checkSeqNum) throws IOException {
  Get get = new Get(row);
  Result result = meta.get(get);
  assertTrue(Bytes.equals(
    result.getValue(HConstants.CATALOG_FAMILY, MetaTableAccessor.getServerColumn(replicaId)),
    Bytes.toBytes(serverName.getHostAndPort())));
  assertTrue(Bytes.equals(
    result.getValue(HConstants.CATALOG_FAMILY, MetaTableAccessor.getStartCodeColumn(replicaId)),
    Bytes.toBytes(serverName.getStartcode())));
  if (checkSeqNum) {
    assertTrue(Bytes.equals(
      result.getValue(HConstants.CATALOG_FAMILY, MetaTableAccessor.getSeqNumColumn(replicaId)),
      Bytes.toBytes(seqNum)));
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:17,代码来源:TestMetaTableAccessor.java

示例11: startAndWriteData

import org.apache.hadoop.hbase.client.Table; //导入依赖的package包/类
private void startAndWriteData() throws IOException, InterruptedException {
  // When the hbase:meta table can be opened, the region servers are running
  new HTable(TEST_UTIL.getConfiguration(), TableName.META_TABLE_NAME);
  this.server = cluster.getRegionServerThreads().get(0).getRegionServer();

  Table table = createTestTable(this.tableName);

  server = TEST_UTIL.getRSForFirstRegionInTable(table.getName());
  for (int i = 1; i <= 256; i++) {    // 256 writes should cause 8 log rolls
    doPut(table, i);
    if (i % 32 == 0) {
      // After every 32 writes sleep to let the log roller run
      try {
        Thread.sleep(2000);
      } catch (InterruptedException e) {
        // continue
      }
    }
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:21,代码来源:TestLogRolling.java

示例12: testVisibilityLabelsOnWALReplay

import org.apache.hadoop.hbase.client.Table; //导入依赖的package包/类
@Test(timeout = 60 * 1000)
public void testVisibilityLabelsOnWALReplay() throws Exception {
  final TableName tableName = TableName.valueOf(TEST_NAME.getMethodName());
  try (Table table = createTableAndWriteDataWithLabels(tableName,
      "(" + SECRET + "|" + CONFIDENTIAL + ")", PRIVATE);) {
    List<RegionServerThread> regionServerThreads = TEST_UTIL.getHBaseCluster()
        .getRegionServerThreads();
    for (RegionServerThread rsThread : regionServerThreads) {
      rsThread.getRegionServer().abort("Aborting ");
    }
    // Start one new RS
    RegionServerThread rs = TEST_UTIL.getHBaseCluster().startRegionServer();
    waitForLabelsRegionAvailability(rs.getRegionServer());
    Scan s = new Scan();
    s.setAuthorizations(new Authorizations(SECRET));
    ResultScanner scanner = table.getScanner(s);
    Result[] next = scanner.next(3);
    assertTrue(next.length == 1);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:21,代码来源:TestVisibilityLabelsWithDefaultVisLabelService.java

示例13: familyFilter

import org.apache.hadoop.hbase.client.Table; //导入依赖的package包/类
/**
 * 列族过滤器
 *
 * @param tableName 表名
 * @param rowFamily 列族
 * @param count     数量
 */
public void familyFilter(String tableName, String rowFamily, int count) {
    HBaseConfiguration hBaseConfiguration = new HBaseConfiguration();
    Table table = hBaseConfiguration.table(tableName);
    Scan scan = new Scan();
    //使用列族过滤器
    //scan.setFilter(new FamilyFilter(CompareFilter.CompareOp.GREATER, new BinaryComparator(Bytes.toBytes(rowFamily))));//直接行健
    //scan.setFilter(new FamilyFilter(CompareFilter.CompareOp.GREATER_OR_EQUAL, new RegexStringComparator("row.*")));//正则表达式
    //scan.setFilter(new FamilyFilter(CompareFilter.CompareOp.GREATER_OR_EQUAL, new SubstringComparator("row")));//字符串包含
    scan.setFilter(new FamilyFilter(CompareFilter.CompareOp.GREATER_OR_EQUAL, new BinaryPrefixComparator("mm".getBytes())));//字符串前缀
    scan.setCaching(10);
    scan.setBatch(10);
    try {
        ResultScanner scanner = table.getScanner(scan);
        Result[] results = scanner.next(count);
        HBaseResultUtil.print(results);
    } catch (IOException e) {
        e.printStackTrace();
    }
}
 
开发者ID:mumuhadoop,项目名称:mumu-hbase,代码行数:27,代码来源:HBaseFilterOperation.java

示例14: qualifierFilter

import org.apache.hadoop.hbase.client.Table; //导入依赖的package包/类
/**
 * 列限定符过滤器
 *
 * @param tableName  表名
 * @param columnName 列限定符
 * @param count      数量
 */
public void qualifierFilter(String tableName, String columnName, int count) {
    HBaseConfiguration hBaseConfiguration = new HBaseConfiguration();
    Table table = hBaseConfiguration.table(tableName);
    Scan scan = new Scan();
    //使用列族过滤器
    scan.setFilter(new QualifierFilter(CompareFilter.CompareOp.EQUAL, new BinaryComparator(Bytes.toBytes(columnName))));//直接行健
    //scan.setFilter(new QualifierFilter(CompareFilter.CompareOp.EQUAL, new RegexStringComparator("row.*")));//正则表达式
    //scan.setFilter(new QualifierFilter(CompareFilter.CompareOp.EQUAL, new SubstringComparator("row")));//字符串包含
    //scan.setFilter(new QualifierFilter(CompareFilter.CompareOp.EQUAL, new BinaryPrefixComparator("m".getBytes())));//字符串前缀
    scan.setCaching(10);
    scan.setBatch(10);
    try {
        ResultScanner scanner = table.getScanner(scan);
        Result[] results = scanner.next(count);
        HBaseResultUtil.print(results);
    } catch (IOException e) {
        e.printStackTrace();
    }
}
 
开发者ID:mumuhadoop,项目名称:mumu-hbase,代码行数:27,代码来源:HBaseFilterOperation.java

示例15: increment

import org.apache.hadoop.hbase.client.Table; //导入依赖的package包/类
@Override
public void increment(TIncrement tincrement) throws IOError, TException {

  if (tincrement.getRow().length == 0 || tincrement.getTable().length == 0) {
    throw new TException("Must supply a table and a row key; can't increment");
  }

  if (conf.getBoolean(COALESCE_INC_KEY, false)) {
    this.coalescer.queueIncrement(tincrement);
    return;
  }

  Table table = null;
  try {
    table = getTable(tincrement.getTable());
    Increment inc = ThriftUtilities.incrementFromThrift(tincrement);
    table.increment(inc);
  } catch (IOException e) {
    LOG.warn(e.getMessage(), e);
    throw new IOError(Throwables.getStackTraceAsString(e));
  } finally{
    closeTable(table);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:25,代码来源:ThriftServerRunner.java


注:本文中的org.apache.hadoop.hbase.client.Table类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。