當前位置: 首頁>>代碼示例>>Java>>正文


Java Table.getScanner方法代碼示例

本文整理匯總了Java中org.apache.hadoop.hbase.client.Table.getScanner方法的典型用法代碼示例。如果您正苦於以下問題:Java Table.getScanner方法的具體用法?Java Table.getScanner怎麽用?Java Table.getScanner使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.apache.hadoop.hbase.client.Table的用法示例。


在下文中一共展示了Table.getScanner方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: verifyTags

import org.apache.hadoop.hbase.client.Table; //導入方法依賴的package包/類
/**
 * This verifies that each cell has a tag that is equal to its rowkey name.  For this to work
 * the hbase instance must have HConstants.RPC_CODEC_CONF_KEY set to
 * KeyValueCodecWithTags.class.getCanonicalName());
 * @param table table containing tagged cells
 * @throws IOException if problems reading table
 */
public static void verifyTags(Table table) throws IOException {
  ResultScanner s = table.getScanner(new Scan());
  for (Result r : s) {
    for (Cell c : r.listCells()) {
      byte[] ta = c.getTagsArray();
      int toff = c.getTagsOffset();
      int tlen = c.getTagsLength();
      Tag t = Tag.getTag(ta, toff, tlen, TagType.ACL_TAG_TYPE);
      if (t == null) {
        fail(c.toString() + " has null tag");
        continue;
      }
      byte[] tval = t.getValue();
      assertArrayEquals(c.toString() + " has tag" + Bytes.toString(tval),
          r.getRow(), tval);
    }
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:26,代碼來源:HFileTestUtil.java

示例2: getRowOrBefore

import org.apache.hadoop.hbase.client.Table; //導入方法依賴的package包/類
public Result getRowOrBefore(Table table, byte[] row, byte[] family) throws IOException {
  long start = System.currentTimeMillis();
  Scan scan = new Scan();
  scan.addFamily(family);
  scan.setReversed(true);
  scan.setStartRow(row);
  scan.setCacheBlocks(false);
  scan.setCaching(1);
  scan.setSmall(true);
  ResultScanner scanner = table.getScanner(scan);
  Result ret = scanner.next();
  scanner.close();
  prevRowTotalTime += System.currentTimeMillis() - start;
  prevRowTotalCount++;
  return ret;
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:17,代碼來源:MDIndex.java

示例3: assertExpectedTable

import org.apache.hadoop.hbase.client.Table; //導入方法依賴的package包/類
/**
 * Checks that all columns have the expected value and that there is the
 * expected number of rows.
 * @throws IOException
 */
void assertExpectedTable(final Connection connection, TableName table, int count, int value)
throws IOException {
  HTableDescriptor [] htds = util.getHBaseAdmin().listTables(table.getNameAsString());
  assertEquals(htds.length, 1);
  Table t = null;
  try {
    t = connection.getTable(table);
    Scan s = new Scan();
    ResultScanner sr = t.getScanner(s);
    int i = 0;
    for (Result r : sr) {
      i++;
      for (NavigableMap<byte[], byte[]> nm : r.getNoVersionMap().values()) {
        for (byte[] val : nm.values()) {
          assertTrue(Bytes.equals(val, value(value)));
        }
      }
    }
    assertEquals(count, i);
  } catch (IOException e) {
    fail("Failed due to exception");
  } finally {
    if (t != null) t.close();
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:31,代碼來源:TestLoadIncrementalHFilesSplitRecovery.java

示例4: familyFilter

import org.apache.hadoop.hbase.client.Table; //導入方法依賴的package包/類
/**
 * 列族過濾器
 *
 * @param tableName 表名
 * @param rowFamily 列族
 * @param count     數量
 */
public void familyFilter(String tableName, String rowFamily, int count) {
    HBaseConfiguration hBaseConfiguration = new HBaseConfiguration();
    Table table = hBaseConfiguration.table(tableName);
    Scan scan = new Scan();
    //使用列族過濾器
    //scan.setFilter(new FamilyFilter(CompareFilter.CompareOp.GREATER, new BinaryComparator(Bytes.toBytes(rowFamily))));//直接行健
    //scan.setFilter(new FamilyFilter(CompareFilter.CompareOp.GREATER_OR_EQUAL, new RegexStringComparator("row.*")));//正則表達式
    //scan.setFilter(new FamilyFilter(CompareFilter.CompareOp.GREATER_OR_EQUAL, new SubstringComparator("row")));//字符串包含
    scan.setFilter(new FamilyFilter(CompareFilter.CompareOp.GREATER_OR_EQUAL, new BinaryPrefixComparator("mm".getBytes())));//字符串前綴
    scan.setCaching(10);
    scan.setBatch(10);
    try {
        ResultScanner scanner = table.getScanner(scan);
        Result[] results = scanner.next(count);
        HBaseResultUtil.print(results);
    } catch (IOException e) {
        e.printStackTrace();
    }
}
 
開發者ID:mumuhadoop,項目名稱:mumu-hbase,代碼行數:27,代碼來源:HBaseFilterOperation.java

示例5: qualifierFilter

import org.apache.hadoop.hbase.client.Table; //導入方法依賴的package包/類
/**
 * 列限定符過濾器
 *
 * @param tableName  表名
 * @param columnName 列限定符
 * @param count      數量
 */
public void qualifierFilter(String tableName, String columnName, int count) {
    HBaseConfiguration hBaseConfiguration = new HBaseConfiguration();
    Table table = hBaseConfiguration.table(tableName);
    Scan scan = new Scan();
    //使用列族過濾器
    scan.setFilter(new QualifierFilter(CompareFilter.CompareOp.EQUAL, new BinaryComparator(Bytes.toBytes(columnName))));//直接行健
    //scan.setFilter(new QualifierFilter(CompareFilter.CompareOp.EQUAL, new RegexStringComparator("row.*")));//正則表達式
    //scan.setFilter(new QualifierFilter(CompareFilter.CompareOp.EQUAL, new SubstringComparator("row")));//字符串包含
    //scan.setFilter(new QualifierFilter(CompareFilter.CompareOp.EQUAL, new BinaryPrefixComparator("m".getBytes())));//字符串前綴
    scan.setCaching(10);
    scan.setBatch(10);
    try {
        ResultScanner scanner = table.getScanner(scan);
        Result[] results = scanner.next(count);
        HBaseResultUtil.print(results);
    } catch (IOException e) {
        e.printStackTrace();
    }
}
 
開發者ID:mumuhadoop,項目名稱:mumu-hbase,代碼行數:27,代碼來源:HBaseFilterOperation.java

示例6: valueFilter

import org.apache.hadoop.hbase.client.Table; //導入方法依賴的package包/類
/**
 * 列限定符過濾器
 *
 * @param tableName   表名
 * @param columnValue 列值
 * @param count       數量
 */
public void valueFilter(String tableName, String columnValue, int count) {
    HBaseConfiguration hBaseConfiguration = new HBaseConfiguration();
    Table table = hBaseConfiguration.table(tableName);
    Scan scan = new Scan();
    //使用列族過濾器
    scan.setFilter(new ValueFilter(CompareFilter.CompareOp.EQUAL, new BinaryComparator(Bytes.toBytes(columnValue))));//直接行健
    //scan.setFilter(new ValueFilter(CompareFilter.CompareOp.EQUAL, new RegexStringComparator("row.*")));//正則表達式
    //scan.setFilter(new ValueFilter(CompareFilter.CompareOp.EQUAL, new SubstringComparator("row")));//字符串包含
    //scan.setFilter(new ValueFilter(CompareFilter.CompareOp.EQUAL, new BinaryPrefixComparator("mm".getBytes())));//字符串前綴
    scan.setCaching(10);
    scan.setBatch(10);
    try {
        ResultScanner scanner = table.getScanner(scan);
        Result[] results = scanner.next(count);
        HBaseResultUtil.print(results);
    } catch (IOException e) {
        e.printStackTrace();
    }
}
 
開發者ID:mumuhadoop,項目名稱:mumu-hbase,代碼行數:27,代碼來源:HBaseFilterOperation.java

示例7: runScanner

import org.apache.hadoop.hbase.client.Table; //導入方法依賴的package包/類
private void runScanner(Table table, boolean slow) throws Exception {
  long time = System.nanoTime();
  Scan scan = new Scan();
  scan.addColumn(cf_essential, col_name);
  scan.addColumn(cf_joined, col_name);

  SingleColumnValueFilter filter = new SingleColumnValueFilter(
      cf_essential, col_name, CompareFilter.CompareOp.EQUAL, flag_yes);
  filter.setFilterIfMissing(true);
  scan.setFilter(filter);
  scan.setLoadColumnFamiliesOnDemand(!slow);

  ResultScanner result_scanner = table.getScanner(scan);
  Result res;
  long rows_count = 0;
  while ((res = result_scanner.next()) != null) {
    rows_count++;
  }

  double timeSec = (System.nanoTime() - time) / 1000000000.0;
  result_scanner.close();
  LOG.info((slow ? "Slow" : "Joined") + " scanner finished in " + Double.toString(timeSec)
    + " seconds, got " + Long.toString(rows_count/2) + " rows");
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:25,代碼來源:TestJoinedScanners.java

示例8: getMetaTableRows

import org.apache.hadoop.hbase.client.Table; //導入方法依賴的package包/類
/**
 * Returns all rows from the hbase:meta table for a given user table
 *
 * @throws IOException When reading the rows fails.
 */
public List<byte[]> getMetaTableRows(TableName tableName) throws IOException {
  // TODO: Redo using MetaTableAccessor.
  Table t = new HTable(new Configuration(this.conf), TableName.META_TABLE_NAME);
  List<byte[]> rows = new ArrayList<byte[]>();
  ResultScanner s = t.getScanner(new Scan());
  for (Result result : s) {
    HRegionInfo info = HRegionInfo.getHRegionInfo(result);
    if (info == null) {
      LOG.error("No region info for row " + Bytes.toString(result.getRow()));
      // TODO figure out what to do for this new hosed case.
      continue;
    }

    if (info.getTable().equals(tableName)) {
      LOG.info("getMetaTableRows: row -> " +
          Bytes.toStringBinary(result.getRow()) + info);
      rows.add(result.getRow());
    }
  }
  s.close();
  t.close();
  return rows;
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:29,代碼來源:HBaseTestingUtility.java

示例9: getScannerResults

import org.apache.hadoop.hbase.client.Table; //導入方法依賴的package包/類
@Override
public List<TResult> getScannerResults(ByteBuffer table, TScan scan, int numRows)
    throws TIOError, TException {
  Table htable = getTable(table);
  List<TResult> results = null;
  ResultScanner scanner = null;
  try {
    scanner = htable.getScanner(scanFromThrift(scan));
    results = resultsFromHBase(scanner.next(numRows));
  } catch (IOException e) {
    throw getTIOError(e);
  } finally {
    if (scanner != null) {
      scanner.close();
    }
    closeTable(htable);
  }
  return results;
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:20,代碼來源:ThriftHBaseServiceHandler.java

示例10: getScanner

import org.apache.hadoop.hbase.client.Table; //導入方法依賴的package包/類
/**
 * Gets a scanner object and handles any exception
 */
public static ResultScanner getScanner(final Table tableInterface, final Scan scan) {
    try {
        return tableInterface.getScanner(scan);
    } catch (final Exception e) {
        closeTable(tableInterface);
        throw new HBaseException(e.getMessage(), e);
    }
}
 
開發者ID:gchq,項目名稱:stroom-stats,代碼行數:12,代碼來源:HBaseTable.java

示例11: SingleScanner

import org.apache.hadoop.hbase.client.Table; //導入方法依賴的package包/類
public SingleScanner(Scan scan, byte[][] resultColumns, Table table) throws IOException {
  this.scan = scan;
  this.resultColumns = resultColumns;
  this.indexTable = table;
  this.currentStartKey = scan.getStartRow();
  this.scanning = true;
  this.finished = false;
  scan.setCacheBlocks(false);
  this.resultScanner = table.getScanner(scan);
  LOG.debug("scan caching:" + scan.getCaching());
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:12,代碼來源:IndexResultScanner.java

示例12: checksumRows

import org.apache.hadoop.hbase.client.Table; //導入方法依賴的package包/類
/**
 * Return an md5 digest of the entire contents of a table.
 */
public String checksumRows(final Table table) throws Exception {
  Scan scan = new Scan();
  ResultScanner results = table.getScanner(scan);
  MessageDigest digest = MessageDigest.getInstance("MD5");
  for (Result res : results) {
    digest.update(res.getRow());
  }
  results.close();
  return digest.toString();
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:14,代碼來源:HBaseTestingUtility.java

示例13: testReversedPartialResultWhenRegionMove

import org.apache.hadoop.hbase.client.Table; //導入方法依賴的package包/類
@Test
public void testReversedPartialResultWhenRegionMove() throws IOException {
  Table table=createTestTable(TableName.valueOf("testReversedPartialResultWhenRegionMove"),
      ROWS, FAMILIES, QUALIFIERS, VALUE);

  moveRegion(table, 1);

  Scan scan = new Scan();
  scan.setMaxResultSize(1);
  scan.setAllowPartialResults(true);
  scan.setReversed(true);
  ResultScanner scanner = table.getScanner(scan);
  for (int i = 0; i < NUM_FAMILIES * NUM_QUALIFIERS-1; i++) {
    scanner.next();
  }
  Result result1 = scanner.next();
  assertEquals(1, result1.rawCells().length);
  Cell c1 = result1.rawCells()[0];
  assertCell(c1, ROWS[NUM_ROWS-1], FAMILIES[NUM_FAMILIES - 1], QUALIFIERS[NUM_QUALIFIERS - 1]);
  assertFalse(result1.isPartial());

  moveRegion(table, 2);

  Result result2 = scanner.next();
  assertEquals(1, result2.rawCells().length);
  Cell c2 = result2.rawCells()[0];
  assertCell(c2, ROWS[NUM_ROWS-2], FAMILIES[0], QUALIFIERS[0]);
  assertTrue(result2.isPartial());

  moveRegion(table, 3);

  Result result3 = scanner.next();
  assertEquals(1, result3.rawCells().length);
  Cell c3 = result3.rawCells()[0];
  assertCell(c3, ROWS[NUM_ROWS-2], FAMILIES[0], QUALIFIERS[1]);
  assertTrue(result3.isPartial());

}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:39,代碼來源:TestPartialResultsFromClientSide.java

示例14: restartHBaseCluster

import org.apache.hadoop.hbase.client.Table; //導入方法依賴的package包/類
/**
 * Starts the hbase cluster up again after shutting it down previously in a
 * test.  Use this if you want to keep dfs/zk up and just stop/start hbase.
 * @param servers number of region servers
 * @throws IOException
 */
public void restartHBaseCluster(int servers) throws IOException, InterruptedException {
  this.hbaseCluster = new MiniHBaseCluster(this.conf, servers);
  // Don't leave here till we've done a successful scan of the hbase:meta
  Table t = new HTable(new Configuration(this.conf), TableName.META_TABLE_NAME);
  ResultScanner s = t.getScanner(new Scan());
  while (s.next() != null) {
    // do nothing
  }
  LOG.info("HBase has been restarted");
  s.close();
  t.close();
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:19,代碼來源:HBaseTestingUtility.java

示例15: countRows

import org.apache.hadoop.hbase.client.Table; //導入方法依賴的package包/類
public int countRows(final Table table, final byte[]... families) throws IOException {
  Scan scan = new Scan();
  for (byte[] family: families) {
    scan.addFamily(family);
  }
  ResultScanner results = table.getScanner(scan);
  int count = 0;
  for (@SuppressWarnings("unused") Result res : results) {
    count++;
  }
  results.close();
  return count;
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:14,代碼來源:HBaseTestingUtility.java


注:本文中的org.apache.hadoop.hbase.client.Table.getScanner方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。