当前位置: 首页>>代码示例>>Java>>正文


Java ResultScanner类代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.client.ResultScanner的典型用法代码示例。如果您正苦于以下问题:Java ResultScanner类的具体用法?Java ResultScanner怎么用?Java ResultScanner使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


ResultScanner类属于org.apache.hadoop.hbase.client包,在下文中一共展示了ResultScanner类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: verify

import org.apache.hadoop.hbase.client.ResultScanner; //导入依赖的package包/类
private void verify(final Table table) throws IOException {
  Scan scan = new Scan();
  scan.addColumn(FAMILY_NAME, COLUMN_NAME);
  scan.setMaxVersions(1);
  ResultScanner scanner = table.getScanner(scan);
  for (Result r: scanner) {
    for (Cell kv : r.listCells()) {
      log.debug(Bytes.toString(r.getRow()) + "\t" + Bytes.toString(CellUtil.cloneFamily(kv))
          + "\t" + Bytes.toString(CellUtil.cloneQualifier(kv))
          + "\t" + kv.getTimestamp() + "\t" + Bytes.toBoolean(CellUtil.cloneValue(kv)));
      org.junit.Assert.assertEquals(TIMESTAMP.get(kv.getTimestamp()),
        (Boolean)Bytes.toBoolean(CellUtil.cloneValue(kv)));
    }
  }
  scanner.close();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:17,代码来源:TestTimeRangeMapRed.java

示例2: QueryAll

import org.apache.hadoop.hbase.client.ResultScanner; //导入依赖的package包/类
public static void QueryAll(String tableName) {
    try {
        HTableInterface table = conn.getTable(tableName);

        ResultScanner rs = table.getScanner(new Scan());
        for (Result r : rs) {
            System.out.println("rowkey:" + new String(r.getRow()));
            for (KeyValue keyValue : r.raw()) {
                System.out.println("column:" + new String(keyValue.getFamily())
                        + "====value:" + new String(keyValue.getValue()));
            }
        }
        table.close();
    } catch (IOException e) {
        e.printStackTrace();
    }
}
 
开发者ID:yjp123456,项目名称:SparkDemo,代码行数:18,代码来源:MyClass.java

示例3: QueryByCondition2

import org.apache.hadoop.hbase.client.ResultScanner; //导入依赖的package包/类
public static void QueryByCondition2(String tableName) {

        try {
            HTablePool pool = new HTablePool(configuration, 1000);
            HTable table = (HTable) pool.getTable(tableName);
            Filter filter = new SingleColumnValueFilter(Bytes
                    .toBytes("column1"), null, CompareOp.EQUAL, Bytes
                    .toBytes("aaa")); // 当列column1的值为aaa时进行查询
            Scan s = new Scan();
            s.setFilter(filter);
            ResultScanner rs = table.getScanner(s);
            for (Result r : rs) {
                System.out.println("获得到rowkey:" + new String(r.getRow()));
                for (KeyValue keyValue : r.raw()) {
                    System.out.println("列:" + new String(keyValue.getFamily())
                            + "====值:" + new String(keyValue.getValue()));
                }
            }
        } catch (Exception e) {
            e.printStackTrace();
        }

    }
 
开发者ID:yjp123456,项目名称:SparkDemo,代码行数:24,代码来源:MyClass.java

示例4: fullScan

import org.apache.hadoop.hbase.client.ResultScanner; //导入依赖的package包/类
/**
 * Performs a full scan of a catalog table.
 * @param connection connection we're using
 * @param visitor Visitor invoked against each row.
 * @param startrow Where to start the scan. Pass null if want to begin scan
 * at first row.
 * <code>hbase:meta</code>, the default (pass false to scan hbase:meta)
 * @throws IOException
 */
public static void fullScan(Connection connection,
  final Visitor visitor, final byte [] startrow)
throws IOException {
  Scan scan = new Scan();
  if (startrow != null) scan.setStartRow(startrow);
  if (startrow == null) {
    int caching = connection.getConfiguration()
        .getInt(HConstants.HBASE_META_SCANNER_CACHING, 100);
    scan.setCaching(caching);
  }
  scan.addFamily(HConstants.CATALOG_FAMILY);
  Table metaTable = getMetaHTable(connection);
  ResultScanner scanner = null;
  try {
    scanner = metaTable.getScanner(scan);
    Result data;
    while((data = scanner.next()) != null) {
      if (data.isEmpty()) continue;
      // Break if visit returns false.
      if (!visitor.visit(data)) break;
    }
  } finally {
    if (scanner != null) scanner.close();
    metaTable.close();
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:36,代码来源:MetaTableAccessor.java

示例5: waitUntilAllRegionsAssigned

import org.apache.hadoop.hbase.client.ResultScanner; //导入依赖的package包/类
private static void waitUntilAllRegionsAssigned()
throws IOException {
  HTable meta = new HTable(TEST_UTIL.getConfiguration(), TableName.META_TABLE_NAME);
  while (true) {
    int rows = 0;
    Scan scan = new Scan();
    scan.addColumn(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER);
    ResultScanner s = meta.getScanner(scan);
    for (Result r = null; (r = s.next()) != null;) {
      byte [] b =
        r.getValue(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER);
      if (b == null || b.length <= 0) {
        break;
      }
      rows++;
    }
    s.close();
    // If I get to here and all rows have a Server, then all have been assigned.
    if (rows >= countOfRegions) {
      break;
    }
    LOG.info("Found=" + rows);
    Threads.sleep(1000);
  }
  meta.close();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:27,代码来源:TestZKBasedOpenCloseRegion.java

示例6: testEquivalenceOfScanResults

import org.apache.hadoop.hbase.client.ResultScanner; //导入依赖的package包/类
public void testEquivalenceOfScanResults(Table table, Scan scan1, Scan scan2) throws Exception {
  ResultScanner scanner1 = table.getScanner(scan1);
  ResultScanner scanner2 = table.getScanner(scan2);

  Result r1 = null;
  Result r2 = null;
  int count = 0;

  while ((r1 = scanner1.next()) != null) {
    r2 = scanner2.next();

    assertTrue(r2 != null);
    compareResults(r1, r2, "Comparing result #" + count);
    count++;
  }

  r2 = scanner2.next();
  assertTrue("r2: " + r2 + " Should be null", r2 == null);

  scanner1.close();
  scanner2.close();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:23,代码来源:TestPartialResultsFromClientSide.java

示例7: familyFilter

import org.apache.hadoop.hbase.client.ResultScanner; //导入依赖的package包/类
/**
 * 列族过滤器
 *
 * @param tableName 表名
 * @param rowFamily 列族
 * @param count     数量
 */
public void familyFilter(String tableName, String rowFamily, int count) {
    HBaseConfiguration hBaseConfiguration = new HBaseConfiguration();
    Table table = hBaseConfiguration.table(tableName);
    Scan scan = new Scan();
    //使用列族过滤器
    //scan.setFilter(new FamilyFilter(CompareFilter.CompareOp.GREATER, new BinaryComparator(Bytes.toBytes(rowFamily))));//直接行健
    //scan.setFilter(new FamilyFilter(CompareFilter.CompareOp.GREATER_OR_EQUAL, new RegexStringComparator("row.*")));//正则表达式
    //scan.setFilter(new FamilyFilter(CompareFilter.CompareOp.GREATER_OR_EQUAL, new SubstringComparator("row")));//字符串包含
    scan.setFilter(new FamilyFilter(CompareFilter.CompareOp.GREATER_OR_EQUAL, new BinaryPrefixComparator("mm".getBytes())));//字符串前缀
    scan.setCaching(10);
    scan.setBatch(10);
    try {
        ResultScanner scanner = table.getScanner(scan);
        Result[] results = scanner.next(count);
        HBaseResultUtil.print(results);
    } catch (IOException e) {
        e.printStackTrace();
    }
}
 
开发者ID:mumuhadoop,项目名称:mumu-hbase,代码行数:27,代码来源:HBaseFilterOperation.java

示例8: qualifierFilter

import org.apache.hadoop.hbase.client.ResultScanner; //导入依赖的package包/类
/**
 * 列限定符过滤器
 *
 * @param tableName  表名
 * @param columnName 列限定符
 * @param count      数量
 */
public void qualifierFilter(String tableName, String columnName, int count) {
    HBaseConfiguration hBaseConfiguration = new HBaseConfiguration();
    Table table = hBaseConfiguration.table(tableName);
    Scan scan = new Scan();
    //使用列族过滤器
    scan.setFilter(new QualifierFilter(CompareFilter.CompareOp.EQUAL, new BinaryComparator(Bytes.toBytes(columnName))));//直接行健
    //scan.setFilter(new QualifierFilter(CompareFilter.CompareOp.EQUAL, new RegexStringComparator("row.*")));//正则表达式
    //scan.setFilter(new QualifierFilter(CompareFilter.CompareOp.EQUAL, new SubstringComparator("row")));//字符串包含
    //scan.setFilter(new QualifierFilter(CompareFilter.CompareOp.EQUAL, new BinaryPrefixComparator("m".getBytes())));//字符串前缀
    scan.setCaching(10);
    scan.setBatch(10);
    try {
        ResultScanner scanner = table.getScanner(scan);
        Result[] results = scanner.next(count);
        HBaseResultUtil.print(results);
    } catch (IOException e) {
        e.printStackTrace();
    }
}
 
开发者ID:mumuhadoop,项目名称:mumu-hbase,代码行数:27,代码来源:HBaseFilterOperation.java

示例9: dependentColumnFilter

import org.apache.hadoop.hbase.client.ResultScanner; //导入依赖的package包/类
/**
 * 参考列过滤器(获取相同时间戳的列)
 *
 * @param tableName    表名
 * @param columnFamily 列族
 * @param qualifier    列限定符
 * @param columnValue  列值
 * @param count        数量
 */
public void dependentColumnFilter(String tableName, String columnFamily, String qualifier, String columnValue, int count) {
    HBaseConfiguration hBaseConfiguration = new HBaseConfiguration();
    Table table = hBaseConfiguration.table(tableName);
    Scan scan = new Scan();
    scan.setFilter(new PrefixFilter(Bytes.toBytes("")));
    scan.setCaching(10);
    scan.setBatch(10);
    try {
        ResultScanner scanner = table.getScanner(scan);
        Result[] results = scanner.next(count);
        HBaseResultUtil.print(results);
    } catch (IOException e) {
        e.printStackTrace();
    }
}
 
开发者ID:mumuhadoop,项目名称:mumu-hbase,代码行数:25,代码来源:HBaseFilterOperation.java

示例10: SingleColumnValueExcludeFilter

import org.apache.hadoop.hbase.client.ResultScanner; //导入依赖的package包/类
/**
 * 单列排除过滤器(返回的列 不包含参考列)
 *
 * @param tableName    表名
 * @param columnFamily 列族
 * @param qualifier    列限定符
 * @param columnValue  列值
 * @param count        数量
 */
public void SingleColumnValueExcludeFilter(String tableName, String columnFamily, String qualifier, String columnValue, int count) {
    HBaseConfiguration hBaseConfiguration = new HBaseConfiguration();
    Table table = hBaseConfiguration.table(tableName);
    Scan scan = new Scan();
    SingleColumnValueExcludeFilter singleColumnValueFilter = new SingleColumnValueExcludeFilter(Bytes.toBytes(columnFamily), Bytes.toBytes(qualifier), CompareFilter.CompareOp.EQUAL, Bytes.toBytes(columnValue));
    //singleColumnValueFilter.setFilterIfMissing(true);//当不存在这列的行 默认不过滤
    singleColumnValueFilter.setLatestVersionOnly(true);//获取最新版本
    scan.setFilter(singleColumnValueFilter);
    scan.setCaching(10);
    //scan.setBatch(10);
    try {
        ResultScanner scanner = table.getScanner(scan);
        Result[] results = scanner.next(count);
        HBaseResultUtil.print(results);
    } catch (IOException e) {
        e.printStackTrace();
    }
}
 
开发者ID:mumuhadoop,项目名称:mumu-hbase,代码行数:28,代码来源:HBaseFilterOperation.java

示例11: testNoPartialResultsWhenRowFilterPresent

import org.apache.hadoop.hbase.client.ResultScanner; //导入依赖的package包/类
/**
 * When a scan has a filter where {@link org.apache.hadoop.hbase.filter.Filter#hasFilterRow()} is
 * true, the scanner should not return partial results. The scanner cannot return partial results
 * because the entire row needs to be read for the include/exclude decision to be made
 */
@Test
public void testNoPartialResultsWhenRowFilterPresent() throws Exception {
  Scan scan = new Scan();
  scan.setMaxResultSize(1);
  scan.setAllowPartialResults(true);
  // If a filter hasFilter() is true then partial results should not be returned else filter
  // application server side would break.
  scan.setFilter(new RandomRowFilter(1.0f));
  ResultScanner scanner = TABLE.getScanner(scan);

  Result r = null;
  while ((r = scanner.next()) != null) {
    assertFalse(r.isPartial());
  }

  scanner.close();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:23,代码来源:TestPartialResultsFromClientSide.java

示例12: copyTable

import org.apache.hadoop.hbase.client.ResultScanner; //导入依赖的package包/类
/**
 * 拷贝表
 * 
 * @throws IOException
 */
public static void copyTable(String oldTableName, String newTableName,String ColumnFamily, String ColumnName)throws IOException {
	if(CreateNewTable(newTableName))
		logger.info("创建表"+newTableName+"表成功");
	else{
		logger.info("创建表"+newTableName+"表失败");
	}
	Scan s = new Scan();
	s.addColumn(Bytes.toBytes(ColumnFamily), Bytes.toBytes(ColumnName));
	s.setMaxVersions(1);
	s.setCacheBlocks(false);
	ResultScanner rs = hbase_table.getScanner(s);
	
	HTableInterface hbase_table_new = conn.getTable(newTableName);
	for (Result r : rs) {
		byte[] key = r.getRow();
		byte[] value = r.getValue(Bytes.toBytes(ColumnFamily), Bytes.toBytes(ColumnName));
		Put put = new Put(key);
		put.add(Bytes.toBytes(ColumnFamily), Bytes.toBytes(ColumnName), value);
		hbase_table_new.put(put);
	}
	rs.close();
	hbase_table_new.close();
}
 
开发者ID:ItGql,项目名称:SparkIsax,代码行数:29,代码来源:HBaseUtils.java

示例13: testHBASE14489

import org.apache.hadoop.hbase.client.ResultScanner; //导入依赖的package包/类
@Test(timeout = 300000)
public void testHBASE14489() throws IOException {
  TableName tableName = TableName.valueOf("testHBASE14489");
  HTable table = util.createTable(tableName, new byte[][] { A });
  Put put = new Put(ROW);
  put.addColumn(A, A, A);
  table.put(put);

  Scan s = new Scan();
  s.setFilter(new FilterAllFilter());
  ResultScanner scanner = table.getScanner(s);
  try {
    for (Result rr = scanner.next(); rr != null; rr = scanner.next()) {
    }
  } finally {
    scanner.close();
  }
  verifyMethodResult(SimpleRegionObserver.class, new String[] { "wasScannerFilterRowCalled" },
    tableName, new Boolean[] { true });
  util.deleteTable(tableName);
  table.close();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:23,代码来源:TestRegionObserverInterface.java

示例14: find

import org.apache.hadoop.hbase.client.ResultScanner; //导入依赖的package包/类
@Override
public <T> List<T> find(TableName tableName, final List<Scan> scanList, final
ResultsExtractor<T> action) {
    assertAccessAvailable();
    return execute(tableName, new TableCallback<List<T>>() {
        @Override
        public List<T> doInTable(Table table) throws Throwable {
            List<T> result = new ArrayList<>(scanList.size());
            for (Scan scan : scanList) {
                final ResultScanner scanner = table.getScanner(scan);
                try {
                    T t = action.extractData(scanner);
                    result.add(t);
                } finally {
                    scanner.close();
                }
            }
            return result;
        }
    });
}
 
开发者ID:fchenxi,项目名称:easyhbase,代码行数:22,代码来源:HbaseTemplate2.java

示例15: splitScan

import org.apache.hadoop.hbase.client.ResultScanner; //导入依赖的package包/类
private ResultScanner[] splitScan(Table table, Scan originalScan, AbstractRowKeyDistributor
        rowKeyDistributor) throws IOException {
    Scan[] scans = rowKeyDistributor.getDistributedScans(originalScan);
    final int length = scans.length;
    for (int i = 0; i < length; i++) {
        Scan scan = scans[i];
        // other properties are already set upon construction
        scan.setId(scan.getId() + "-" + i);
    }

    ResultScanner[] scanners = new ResultScanner[length];
    boolean success = false;
    try {
        for (int i = 0; i < length; i++) {
            scanners[i] = table.getScanner(scans[i]);
        }
        success = true;
    } finally {
        if (!success) {
            closeScanner(scanners);
        }
    }
    return scanners;
}
 
开发者ID:fchenxi,项目名称:easyhbase,代码行数:25,代码来源:HbaseTemplate2.java


注:本文中的org.apache.hadoop.hbase.client.ResultScanner类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。