當前位置: 首頁>>代碼示例>>Java>>正文


Java ResultScanner類代碼示例

本文整理匯總了Java中org.apache.hadoop.hbase.client.ResultScanner的典型用法代碼示例。如果您正苦於以下問題:Java ResultScanner類的具體用法?Java ResultScanner怎麽用?Java ResultScanner使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。


ResultScanner類屬於org.apache.hadoop.hbase.client包,在下文中一共展示了ResultScanner類的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: verify

import org.apache.hadoop.hbase.client.ResultScanner; //導入依賴的package包/類
private void verify(final Table table) throws IOException {
  Scan scan = new Scan();
  scan.addColumn(FAMILY_NAME, COLUMN_NAME);
  scan.setMaxVersions(1);
  ResultScanner scanner = table.getScanner(scan);
  for (Result r: scanner) {
    for (Cell kv : r.listCells()) {
      log.debug(Bytes.toString(r.getRow()) + "\t" + Bytes.toString(CellUtil.cloneFamily(kv))
          + "\t" + Bytes.toString(CellUtil.cloneQualifier(kv))
          + "\t" + kv.getTimestamp() + "\t" + Bytes.toBoolean(CellUtil.cloneValue(kv)));
      org.junit.Assert.assertEquals(TIMESTAMP.get(kv.getTimestamp()),
        (Boolean)Bytes.toBoolean(CellUtil.cloneValue(kv)));
    }
  }
  scanner.close();
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:17,代碼來源:TestTimeRangeMapRed.java

示例2: QueryAll

import org.apache.hadoop.hbase.client.ResultScanner; //導入依賴的package包/類
public static void QueryAll(String tableName) {
    try {
        HTableInterface table = conn.getTable(tableName);

        ResultScanner rs = table.getScanner(new Scan());
        for (Result r : rs) {
            System.out.println("rowkey:" + new String(r.getRow()));
            for (KeyValue keyValue : r.raw()) {
                System.out.println("column:" + new String(keyValue.getFamily())
                        + "====value:" + new String(keyValue.getValue()));
            }
        }
        table.close();
    } catch (IOException e) {
        e.printStackTrace();
    }
}
 
開發者ID:yjp123456,項目名稱:SparkDemo,代碼行數:18,代碼來源:MyClass.java

示例3: QueryByCondition2

import org.apache.hadoop.hbase.client.ResultScanner; //導入依賴的package包/類
public static void QueryByCondition2(String tableName) {

        try {
            HTablePool pool = new HTablePool(configuration, 1000);
            HTable table = (HTable) pool.getTable(tableName);
            Filter filter = new SingleColumnValueFilter(Bytes
                    .toBytes("column1"), null, CompareOp.EQUAL, Bytes
                    .toBytes("aaa")); // 當列column1的值為aaa時進行查詢
            Scan s = new Scan();
            s.setFilter(filter);
            ResultScanner rs = table.getScanner(s);
            for (Result r : rs) {
                System.out.println("獲得到rowkey:" + new String(r.getRow()));
                for (KeyValue keyValue : r.raw()) {
                    System.out.println("列:" + new String(keyValue.getFamily())
                            + "====值:" + new String(keyValue.getValue()));
                }
            }
        } catch (Exception e) {
            e.printStackTrace();
        }

    }
 
開發者ID:yjp123456,項目名稱:SparkDemo,代碼行數:24,代碼來源:MyClass.java

示例4: fullScan

import org.apache.hadoop.hbase.client.ResultScanner; //導入依賴的package包/類
/**
 * Performs a full scan of a catalog table.
 * @param connection connection we're using
 * @param visitor Visitor invoked against each row.
 * @param startrow Where to start the scan. Pass null if want to begin scan
 * at first row.
 * <code>hbase:meta</code>, the default (pass false to scan hbase:meta)
 * @throws IOException
 */
public static void fullScan(Connection connection,
  final Visitor visitor, final byte [] startrow)
throws IOException {
  Scan scan = new Scan();
  if (startrow != null) scan.setStartRow(startrow);
  if (startrow == null) {
    int caching = connection.getConfiguration()
        .getInt(HConstants.HBASE_META_SCANNER_CACHING, 100);
    scan.setCaching(caching);
  }
  scan.addFamily(HConstants.CATALOG_FAMILY);
  Table metaTable = getMetaHTable(connection);
  ResultScanner scanner = null;
  try {
    scanner = metaTable.getScanner(scan);
    Result data;
    while((data = scanner.next()) != null) {
      if (data.isEmpty()) continue;
      // Break if visit returns false.
      if (!visitor.visit(data)) break;
    }
  } finally {
    if (scanner != null) scanner.close();
    metaTable.close();
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:36,代碼來源:MetaTableAccessor.java

示例5: waitUntilAllRegionsAssigned

import org.apache.hadoop.hbase.client.ResultScanner; //導入依賴的package包/類
private static void waitUntilAllRegionsAssigned()
throws IOException {
  HTable meta = new HTable(TEST_UTIL.getConfiguration(), TableName.META_TABLE_NAME);
  while (true) {
    int rows = 0;
    Scan scan = new Scan();
    scan.addColumn(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER);
    ResultScanner s = meta.getScanner(scan);
    for (Result r = null; (r = s.next()) != null;) {
      byte [] b =
        r.getValue(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER);
      if (b == null || b.length <= 0) {
        break;
      }
      rows++;
    }
    s.close();
    // If I get to here and all rows have a Server, then all have been assigned.
    if (rows >= countOfRegions) {
      break;
    }
    LOG.info("Found=" + rows);
    Threads.sleep(1000);
  }
  meta.close();
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:27,代碼來源:TestZKBasedOpenCloseRegion.java

示例6: testEquivalenceOfScanResults

import org.apache.hadoop.hbase.client.ResultScanner; //導入依賴的package包/類
public void testEquivalenceOfScanResults(Table table, Scan scan1, Scan scan2) throws Exception {
  ResultScanner scanner1 = table.getScanner(scan1);
  ResultScanner scanner2 = table.getScanner(scan2);

  Result r1 = null;
  Result r2 = null;
  int count = 0;

  while ((r1 = scanner1.next()) != null) {
    r2 = scanner2.next();

    assertTrue(r2 != null);
    compareResults(r1, r2, "Comparing result #" + count);
    count++;
  }

  r2 = scanner2.next();
  assertTrue("r2: " + r2 + " Should be null", r2 == null);

  scanner1.close();
  scanner2.close();
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:23,代碼來源:TestPartialResultsFromClientSide.java

示例7: familyFilter

import org.apache.hadoop.hbase.client.ResultScanner; //導入依賴的package包/類
/**
 * 列族過濾器
 *
 * @param tableName 表名
 * @param rowFamily 列族
 * @param count     數量
 */
public void familyFilter(String tableName, String rowFamily, int count) {
    HBaseConfiguration hBaseConfiguration = new HBaseConfiguration();
    Table table = hBaseConfiguration.table(tableName);
    Scan scan = new Scan();
    //使用列族過濾器
    //scan.setFilter(new FamilyFilter(CompareFilter.CompareOp.GREATER, new BinaryComparator(Bytes.toBytes(rowFamily))));//直接行健
    //scan.setFilter(new FamilyFilter(CompareFilter.CompareOp.GREATER_OR_EQUAL, new RegexStringComparator("row.*")));//正則表達式
    //scan.setFilter(new FamilyFilter(CompareFilter.CompareOp.GREATER_OR_EQUAL, new SubstringComparator("row")));//字符串包含
    scan.setFilter(new FamilyFilter(CompareFilter.CompareOp.GREATER_OR_EQUAL, new BinaryPrefixComparator("mm".getBytes())));//字符串前綴
    scan.setCaching(10);
    scan.setBatch(10);
    try {
        ResultScanner scanner = table.getScanner(scan);
        Result[] results = scanner.next(count);
        HBaseResultUtil.print(results);
    } catch (IOException e) {
        e.printStackTrace();
    }
}
 
開發者ID:mumuhadoop,項目名稱:mumu-hbase,代碼行數:27,代碼來源:HBaseFilterOperation.java

示例8: qualifierFilter

import org.apache.hadoop.hbase.client.ResultScanner; //導入依賴的package包/類
/**
 * 列限定符過濾器
 *
 * @param tableName  表名
 * @param columnName 列限定符
 * @param count      數量
 */
public void qualifierFilter(String tableName, String columnName, int count) {
    HBaseConfiguration hBaseConfiguration = new HBaseConfiguration();
    Table table = hBaseConfiguration.table(tableName);
    Scan scan = new Scan();
    //使用列族過濾器
    scan.setFilter(new QualifierFilter(CompareFilter.CompareOp.EQUAL, new BinaryComparator(Bytes.toBytes(columnName))));//直接行健
    //scan.setFilter(new QualifierFilter(CompareFilter.CompareOp.EQUAL, new RegexStringComparator("row.*")));//正則表達式
    //scan.setFilter(new QualifierFilter(CompareFilter.CompareOp.EQUAL, new SubstringComparator("row")));//字符串包含
    //scan.setFilter(new QualifierFilter(CompareFilter.CompareOp.EQUAL, new BinaryPrefixComparator("m".getBytes())));//字符串前綴
    scan.setCaching(10);
    scan.setBatch(10);
    try {
        ResultScanner scanner = table.getScanner(scan);
        Result[] results = scanner.next(count);
        HBaseResultUtil.print(results);
    } catch (IOException e) {
        e.printStackTrace();
    }
}
 
開發者ID:mumuhadoop,項目名稱:mumu-hbase,代碼行數:27,代碼來源:HBaseFilterOperation.java

示例9: dependentColumnFilter

import org.apache.hadoop.hbase.client.ResultScanner; //導入依賴的package包/類
/**
 * 參考列過濾器(獲取相同時間戳的列)
 *
 * @param tableName    表名
 * @param columnFamily 列族
 * @param qualifier    列限定符
 * @param columnValue  列值
 * @param count        數量
 */
public void dependentColumnFilter(String tableName, String columnFamily, String qualifier, String columnValue, int count) {
    HBaseConfiguration hBaseConfiguration = new HBaseConfiguration();
    Table table = hBaseConfiguration.table(tableName);
    Scan scan = new Scan();
    scan.setFilter(new PrefixFilter(Bytes.toBytes("")));
    scan.setCaching(10);
    scan.setBatch(10);
    try {
        ResultScanner scanner = table.getScanner(scan);
        Result[] results = scanner.next(count);
        HBaseResultUtil.print(results);
    } catch (IOException e) {
        e.printStackTrace();
    }
}
 
開發者ID:mumuhadoop,項目名稱:mumu-hbase,代碼行數:25,代碼來源:HBaseFilterOperation.java

示例10: SingleColumnValueExcludeFilter

import org.apache.hadoop.hbase.client.ResultScanner; //導入依賴的package包/類
/**
 * 單列排除過濾器(返回的列 不包含參考列)
 *
 * @param tableName    表名
 * @param columnFamily 列族
 * @param qualifier    列限定符
 * @param columnValue  列值
 * @param count        數量
 */
public void SingleColumnValueExcludeFilter(String tableName, String columnFamily, String qualifier, String columnValue, int count) {
    HBaseConfiguration hBaseConfiguration = new HBaseConfiguration();
    Table table = hBaseConfiguration.table(tableName);
    Scan scan = new Scan();
    SingleColumnValueExcludeFilter singleColumnValueFilter = new SingleColumnValueExcludeFilter(Bytes.toBytes(columnFamily), Bytes.toBytes(qualifier), CompareFilter.CompareOp.EQUAL, Bytes.toBytes(columnValue));
    //singleColumnValueFilter.setFilterIfMissing(true);//當不存在這列的行 默認不過濾
    singleColumnValueFilter.setLatestVersionOnly(true);//獲取最新版本
    scan.setFilter(singleColumnValueFilter);
    scan.setCaching(10);
    //scan.setBatch(10);
    try {
        ResultScanner scanner = table.getScanner(scan);
        Result[] results = scanner.next(count);
        HBaseResultUtil.print(results);
    } catch (IOException e) {
        e.printStackTrace();
    }
}
 
開發者ID:mumuhadoop,項目名稱:mumu-hbase,代碼行數:28,代碼來源:HBaseFilterOperation.java

示例11: testNoPartialResultsWhenRowFilterPresent

import org.apache.hadoop.hbase.client.ResultScanner; //導入依賴的package包/類
/**
 * When a scan has a filter where {@link org.apache.hadoop.hbase.filter.Filter#hasFilterRow()} is
 * true, the scanner should not return partial results. The scanner cannot return partial results
 * because the entire row needs to be read for the include/exclude decision to be made
 */
@Test
public void testNoPartialResultsWhenRowFilterPresent() throws Exception {
  Scan scan = new Scan();
  scan.setMaxResultSize(1);
  scan.setAllowPartialResults(true);
  // If a filter hasFilter() is true then partial results should not be returned else filter
  // application server side would break.
  scan.setFilter(new RandomRowFilter(1.0f));
  ResultScanner scanner = TABLE.getScanner(scan);

  Result r = null;
  while ((r = scanner.next()) != null) {
    assertFalse(r.isPartial());
  }

  scanner.close();
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:23,代碼來源:TestPartialResultsFromClientSide.java

示例12: copyTable

import org.apache.hadoop.hbase.client.ResultScanner; //導入依賴的package包/類
/**
 * 拷貝表
 * 
 * @throws IOException
 */
public static void copyTable(String oldTableName, String newTableName,String ColumnFamily, String ColumnName)throws IOException {
	if(CreateNewTable(newTableName))
		logger.info("創建表"+newTableName+"表成功");
	else{
		logger.info("創建表"+newTableName+"表失敗");
	}
	Scan s = new Scan();
	s.addColumn(Bytes.toBytes(ColumnFamily), Bytes.toBytes(ColumnName));
	s.setMaxVersions(1);
	s.setCacheBlocks(false);
	ResultScanner rs = hbase_table.getScanner(s);
	
	HTableInterface hbase_table_new = conn.getTable(newTableName);
	for (Result r : rs) {
		byte[] key = r.getRow();
		byte[] value = r.getValue(Bytes.toBytes(ColumnFamily), Bytes.toBytes(ColumnName));
		Put put = new Put(key);
		put.add(Bytes.toBytes(ColumnFamily), Bytes.toBytes(ColumnName), value);
		hbase_table_new.put(put);
	}
	rs.close();
	hbase_table_new.close();
}
 
開發者ID:ItGql,項目名稱:SparkIsax,代碼行數:29,代碼來源:HBaseUtils.java

示例13: testHBASE14489

import org.apache.hadoop.hbase.client.ResultScanner; //導入依賴的package包/類
@Test(timeout = 300000)
public void testHBASE14489() throws IOException {
  TableName tableName = TableName.valueOf("testHBASE14489");
  HTable table = util.createTable(tableName, new byte[][] { A });
  Put put = new Put(ROW);
  put.addColumn(A, A, A);
  table.put(put);

  Scan s = new Scan();
  s.setFilter(new FilterAllFilter());
  ResultScanner scanner = table.getScanner(s);
  try {
    for (Result rr = scanner.next(); rr != null; rr = scanner.next()) {
    }
  } finally {
    scanner.close();
  }
  verifyMethodResult(SimpleRegionObserver.class, new String[] { "wasScannerFilterRowCalled" },
    tableName, new Boolean[] { true });
  util.deleteTable(tableName);
  table.close();
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:23,代碼來源:TestRegionObserverInterface.java

示例14: find

import org.apache.hadoop.hbase.client.ResultScanner; //導入依賴的package包/類
@Override
public <T> List<T> find(TableName tableName, final List<Scan> scanList, final
ResultsExtractor<T> action) {
    assertAccessAvailable();
    return execute(tableName, new TableCallback<List<T>>() {
        @Override
        public List<T> doInTable(Table table) throws Throwable {
            List<T> result = new ArrayList<>(scanList.size());
            for (Scan scan : scanList) {
                final ResultScanner scanner = table.getScanner(scan);
                try {
                    T t = action.extractData(scanner);
                    result.add(t);
                } finally {
                    scanner.close();
                }
            }
            return result;
        }
    });
}
 
開發者ID:fchenxi,項目名稱:easyhbase,代碼行數:22,代碼來源:HbaseTemplate2.java

示例15: splitScan

import org.apache.hadoop.hbase.client.ResultScanner; //導入依賴的package包/類
private ResultScanner[] splitScan(Table table, Scan originalScan, AbstractRowKeyDistributor
        rowKeyDistributor) throws IOException {
    Scan[] scans = rowKeyDistributor.getDistributedScans(originalScan);
    final int length = scans.length;
    for (int i = 0; i < length; i++) {
        Scan scan = scans[i];
        // other properties are already set upon construction
        scan.setId(scan.getId() + "-" + i);
    }

    ResultScanner[] scanners = new ResultScanner[length];
    boolean success = false;
    try {
        for (int i = 0; i < length; i++) {
            scanners[i] = table.getScanner(scans[i]);
        }
        success = true;
    } finally {
        if (!success) {
            closeScanner(scanners);
        }
    }
    return scanners;
}
 
開發者ID:fchenxi,項目名稱:easyhbase,代碼行數:25,代碼來源:HbaseTemplate2.java


注:本文中的org.apache.hadoop.hbase.client.ResultScanner類示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。