当前位置: 首页>>代码示例>>Java>>正文


Java HTable.getScanner方法代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.client.HTable.getScanner方法的典型用法代码示例。如果您正苦于以下问题:Java HTable.getScanner方法的具体用法?Java HTable.getScanner怎么用?Java HTable.getScanner使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hbase.client.HTable的用法示例。


在下文中一共展示了HTable.getScanner方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: getScanResult

import org.apache.hadoop.hbase.client.HTable; //导入方法依赖的package包/类
private List<Cell> getScanResult(byte[] startRow, byte[] stopRow, HTable ht) throws IOException {
  Scan scan = new Scan();
  scan.setMaxVersions();
  if(!Bytes.toString(startRow).isEmpty()) {
    scan.setStartRow(startRow);
  }
  if(!Bytes.toString(stopRow).isEmpty()) {
    scan.setStopRow(stopRow);
  }
  ResultScanner scanner = ht.getScanner(scan);
  List<Cell> kvList = new ArrayList<Cell>();
  Result r;
  while ((r = scanner.next()) != null) {
    for (Cell kv : r.listCells()) {
      kvList.add(kv);
    }
  }
  return kvList;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:20,代码来源:TestMultiRowRangeFilter.java

示例2: QueryByCondition2

import org.apache.hadoop.hbase.client.HTable; //导入方法依赖的package包/类
public static void QueryByCondition2(String tableName) {

        try {
            HTablePool pool = new HTablePool(configuration, 1000);
            HTable table = (HTable) pool.getTable(tableName);
            Filter filter = new SingleColumnValueFilter(Bytes
                    .toBytes("column1"), null, CompareOp.EQUAL, Bytes
                    .toBytes("aaa")); // 当列column1的值为aaa时进行查询
            Scan s = new Scan();
            s.setFilter(filter);
            ResultScanner rs = table.getScanner(s);
            for (Result r : rs) {
                System.out.println("获得到rowkey:" + new String(r.getRow()));
                for (KeyValue keyValue : r.raw()) {
                    System.out.println("列:" + new String(keyValue.getFamily())
                            + "====值:" + new String(keyValue.getValue()));
                }
            }
        } catch (Exception e) {
            e.printStackTrace();
        }

    }
 
开发者ID:yjp123456,项目名称:SparkDemo,代码行数:24,代码来源:MyClass.java

示例3: getAllRecord

import org.apache.hadoop.hbase.client.HTable; //导入方法依赖的package包/类
/**
 * Getting all records  a row from an existing SS tables 
 * @method getAllRecord
 * @inputParameters hbaseBtable Name used
 * @return type: no return type as its a void method 
 * 
 **/
@SuppressWarnings({ "deprecation", "resource" })
public static void getAllRecord(String myHbaseBtableName) {
  ResultScanner hbaseBSs = null;
  try {
    HTable hbaseBtable = new HTable(hbaseBconf, myHbaseBtableName);
    Scan hbaseBScan = new Scan();
    hbaseBSs = hbaseBtable.getScanner(hbaseBScan);
    for (Result r : hbaseBSs) {
      for (KeyValue hbaseBkv : r.raw()) {
        System.out.print(new String(hbaseBkv.getRow()) + " ");
        System.out.print(new String(hbaseBkv.getFamily()) + ":");
        System.out.print(new String(hbaseBkv.getQualifier()) + " ");
        System.out.print(hbaseBkv.getTimestamp() + " ");
        System.out.println(new String(hbaseBkv.getValue()));
      }
    }
  } catch (IOException eio) {
    eip.printStackTrace();
  } finally {
    if (hbaseBSs != null) hbaseBSs.close();
    // closing the ss hbaseBtable 
  }
}
 
开发者ID:PacktPublishing,项目名称:HBase-High-Performance-Cookbook,代码行数:31,代码来源:HBaseRegularClient.java

示例4: setup

import org.apache.hadoop.hbase.client.HTable; //导入方法依赖的package包/类
@Override
public void setup(OperatorContext context, OutputMutator output) throws ExecutionSetupException {
  this.operatorContext = context;
  this.outputMutator = output;
  familyVectorMap = new HashMap<String, MapVector>();

  try {
    // Add Vectors to output in the order specified when creating reader
    for (SchemaPath column : getColumns()) {
      if (column.equals(ROW_KEY_PATH)) {
        MaterializedField field = MaterializedField.create(column, ROW_KEY_TYPE);
        rowKeyVector = outputMutator.addField(field, VarBinaryVector.class);
      } else {
        getOrCreateFamilyVector(column.getRootSegment().getPath(), false);
      }
    }
    logger.debug("Opening scanner for HBase table '{}', Zookeeper quorum '{}', port '{}', znode '{}'.",
        hbaseTableName, hbaseConf.get(HConstants.ZOOKEEPER_QUORUM),
        hbaseConf.get(HBASE_ZOOKEEPER_PORT), hbaseConf.get(HConstants.ZOOKEEPER_ZNODE_PARENT));
    hTable = new HTable(hbaseConf, hbaseTableName);
    resultScanner = hTable.getScanner(hbaseScan);
  } catch (SchemaChangeException | IOException e) {
    throw new ExecutionSetupException(e);
  }
}
 
开发者ID:skhalifa,项目名称:QDrill,代码行数:26,代码来源:HBaseRecordReader.java

示例5: countHBaseTable

import org.apache.hadoop.hbase.client.HTable; //导入方法依赖的package包/类
protected int countHBaseTable(String tableName, String colFamily)
    throws IOException {
  int count = 0;
  HTable table = new HTable(new Configuration(
      hbaseTestUtil.getConfiguration()), Bytes.toBytes(tableName));
  try {
    ResultScanner scanner = table.getScanner(Bytes.toBytes(colFamily));
    for(Result result = scanner.next();
        result != null;
        result = scanner.next()) {
      count++;
    }
  } finally {
    table.close();
  }
  return count;
}
 
开发者ID:aliyun,项目名称:aliyun-maxcompute-data-collectors,代码行数:18,代码来源:HBaseTestCase.java

示例6: waitUntilAllRegionsAssigned

import org.apache.hadoop.hbase.client.HTable; //导入方法依赖的package包/类
private static void waitUntilAllRegionsAssigned()
throws IOException {
  HTable meta = new HTable(TEST_UTIL.getConfiguration(), TableName.META_TABLE_NAME);
  while (true) {
    int rows = 0;
    Scan scan = new Scan();
    scan.addColumn(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER);
    ResultScanner s = meta.getScanner(scan);
    for (Result r = null; (r = s.next()) != null;) {
      byte [] b =
        r.getValue(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER);
      if (b == null || b.length <= 0) {
        break;
      }
      rows++;
    }
    s.close();
    // If I get to here and all rows have a Server, then all have been assigned.
    if (rows >= countOfRegions) {
      break;
    }
    LOG.info("Found=" + rows);
    Threads.sleep(1000);
  }
  meta.close();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:27,代码来源:TestZKBasedOpenCloseRegion.java

示例7: testHBASE14489

import org.apache.hadoop.hbase.client.HTable; //导入方法依赖的package包/类
@Test(timeout = 300000)
public void testHBASE14489() throws IOException {
  TableName tableName = TableName.valueOf("testHBASE14489");
  HTable table = util.createTable(tableName, new byte[][] { A });
  Put put = new Put(ROW);
  put.addColumn(A, A, A);
  table.put(put);

  Scan s = new Scan();
  s.setFilter(new FilterAllFilter());
  ResultScanner scanner = table.getScanner(s);
  try {
    for (Result rr = scanner.next(); rr != null; rr = scanner.next()) {
    }
  } finally {
    scanner.close();
  }
  verifyMethodResult(SimpleRegionObserver.class, new String[] { "wasScannerFilterRowCalled" },
    tableName, new Boolean[] { true });
  util.deleteTable(tableName);
  table.close();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:23,代码来源:TestRegionObserverInterface.java

示例8: deleteTableData

import org.apache.hadoop.hbase.client.HTable; //导入方法依赖的package包/类
/**
 * Provide an existing table name to truncate.
 * Scans the table and issues a delete for each row read.
 * @param tableName existing table
 * @return HTable to that new table
 * @throws IOException
 */
public HTable deleteTableData(TableName tableName) throws IOException {
  HTable table = new HTable(getConfiguration(), tableName);
  Scan scan = new Scan();
  ResultScanner resScan = table.getScanner(scan);
  for(Result res : resScan) {
    Delete del = new Delete(res.getRow());
    table.delete(del);
  }
  resScan = table.getScanner(scan);
  resScan.close();
  return table;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:20,代码来源:HBaseTestingUtility.java

示例9: addToEachStartKey

import org.apache.hadoop.hbase.client.HTable; //导入方法依赖的package包/类
private static int addToEachStartKey(final int expected) throws IOException {
  HTable t = new HTable(TEST_UTIL.getConfiguration(), TABLENAME);
  HTable meta = new HTable(TEST_UTIL.getConfiguration(),
      TableName.META_TABLE_NAME);
  int rows = 0;
  Scan scan = new Scan();
  scan.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
  ResultScanner s = meta.getScanner(scan);
  for (Result r = null; (r = s.next()) != null;) {
    HRegionInfo hri = HRegionInfo.getHRegionInfo(r);
    if (hri == null) break;
    if(!hri.getTable().equals(TABLENAME)) {
      continue;
    }
    // If start key, add 'aaa'.
    byte [] row = getStartKey(hri);
    Put p = new Put(row);
    p.setDurability(Durability.SKIP_WAL);
    p.add(getTestFamily(), getTestQualifier(), row);
    t.put(p);
    rows++;
  }
  s.close();
  Assert.assertEquals(expected, rows);
  t.close();
  meta.close();
  return rows;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:29,代码来源:TestZKBasedOpenCloseRegion.java

示例10: scanMeta

import org.apache.hadoop.hbase.client.HTable; //导入方法依赖的package包/类
/**
 * Dumps hbase:meta table info
 *
 * @return # of entries in meta.
 */
protected int scanMeta() throws IOException {
  int count = 0;
  HTable meta = new HTable(conf, TableName.META_TABLE_NAME);
  ResultScanner scanner = meta.getScanner(new Scan());
  LOG.info("Table: " + Bytes.toString(meta.getTableName()));
  for (Result res : scanner) {
    LOG.info(Bytes.toString(res.getRow()));
    count++;
  }
  meta.close();
  return count;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:18,代码来源:OfflineMetaRebuildTestCore.java

示例11: testBulkLoad

import org.apache.hadoop.hbase.client.HTable; //导入方法依赖的package包/类
@Test
public void testBulkLoad() throws Exception {
  TableName tableName = TableName.valueOf("testBulkLoad");
  long l = System.currentTimeMillis();
  HBaseAdmin admin = new HBaseAdmin(TEST_UTIL.getConfiguration());
  createTable(admin, tableName);
  Scan scan = createScan();
  final HTable table = init(admin, l, scan, tableName);
  // use bulkload
  final Path hfilePath = writeToHFile(l, "/temp/testBulkLoad/", "/temp/testBulkLoad/col/file",
    false);
  Configuration conf = TEST_UTIL.getConfiguration();
  conf.setBoolean("hbase.mapreduce.bulkload.assign.sequenceNumbers", true);
  final LoadIncrementalHFiles bulkload = new LoadIncrementalHFiles(conf);
  bulkload.doBulkLoad(hfilePath, table);
  ResultScanner scanner = table.getScanner(scan);
  Result result = scanner.next();
  result = scanAfterBulkLoad(scanner, result, "version2");
  Put put0 = new Put(Bytes.toBytes("row1"));
  put0.add(new KeyValue(Bytes.toBytes("row1"), Bytes.toBytes("col"), Bytes.toBytes("q"), l, Bytes
      .toBytes("version3")));
  table.put(put0);
  admin.flush(tableName);
  scanner = table.getScanner(scan);
  result = scanner.next();
  while (result != null) {
    List<KeyValue> kvs = result.getColumn(Bytes.toBytes("col"), Bytes.toBytes("q"));
    for (KeyValue _kv : kvs) {
      if (Bytes.toString(_kv.getRow()).equals("row1")) {
        System.out.println(Bytes.toString(_kv.getRow()));
        System.out.println(Bytes.toString(_kv.getQualifier()));
        System.out.println(Bytes.toString(_kv.getValue()));
        Assert.assertEquals("version3", Bytes.toString(_kv.getValue()));
      }
    }
    result = scanner.next();
  }
  scanner.close();
  table.close();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:41,代码来源:TestScannerWithBulkload.java

示例12: getResultsSize

import org.apache.hadoop.hbase.client.HTable; //导入方法依赖的package包/类
private int getResultsSize(HTable ht, Scan scan) throws IOException {
  ResultScanner scanner = ht.getScanner(scan);
  List<Cell> results = new ArrayList<Cell>();
  Result r;
  while ((r = scanner.next()) != null) {
    for (Cell kv : r.listCells()) {
      results.add(kv);
    }
  }
  return results.size();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:12,代码来源:TestMultiRowRangeFilter.java

示例13: assertAllOnLine

import org.apache.hadoop.hbase.client.HTable; //导入方法依赖的package包/类
private void assertAllOnLine(final HTable t) throws IOException {
  NavigableMap<HRegionInfo, ServerName> regions = t.getRegionLocations();
  for (Map.Entry<HRegionInfo, ServerName> e: regions.entrySet()) {
    byte [] startkey = e.getKey().getStartKey();
    Scan s = new Scan(startkey);
    ResultScanner scanner = t.getScanner(s);
    Result r = scanner.next();
    org.junit.Assert.assertTrue(r != null && r.size() > 0);
    scanner.close();
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:12,代码来源:TestLoadAndSwitchEncodeOnDisk.java

示例14: QueryByCondition3

import org.apache.hadoop.hbase.client.HTable; //导入方法依赖的package包/类
public static void QueryByCondition3(String tableName) {

        try {
            HTablePool pool = new HTablePool(configuration, 1000);
            HTable table = (HTable) pool.getTable(tableName);

            List<Filter> filters = new ArrayList<Filter>();

            Filter filter1 = new SingleColumnValueFilter(Bytes
                    .toBytes("column1"), null, CompareOp.EQUAL, Bytes
                    .toBytes("aaa"));
            filters.add(filter1);

            Filter filter2 = new SingleColumnValueFilter(Bytes
                    .toBytes("column2"), null, CompareOp.EQUAL, Bytes
                    .toBytes("bbb"));
            filters.add(filter2);

            Filter filter3 = new SingleColumnValueFilter(Bytes
                    .toBytes("column3"), null, CompareOp.EQUAL, Bytes
                    .toBytes("ccc"));
            filters.add(filter3);

            FilterList filterList1 = new FilterList(filters);

            Scan scan = new Scan();
            scan.setFilter(filterList1);
            ResultScanner rs = table.getScanner(scan);
            for (Result r : rs) {
                System.out.println("获得到rowkey:" + new String(r.getRow()));
                for (KeyValue keyValue : r.raw()) {
                    System.out.println("列:" + new String(keyValue.getFamily())
                            + "====值:" + new String(keyValue.getValue()));
                }
            }
            rs.close();

        } catch (Exception e) {
            e.printStackTrace();
        }

    }
 
开发者ID:yjp123456,项目名称:SparkDemo,代码行数:43,代码来源:MyClass.java

示例15: main

import org.apache.hadoop.hbase.client.HTable; //导入方法依赖的package包/类
public static void main(String[] args) {
	try {
		Configuration conf = new Configuration();
		conf.set("fs.defaultFS", "hdfs://hadoop1:8020");
		conf.set("yarn.resourcemanager.hostname", "hadoop1");
		conf.set("hbase.zookeeper.quorum", "hadoop1,hadoop2,hadoop3");
		conf = HBaseConfiguration.create(conf);
		HTable table = new HTable(conf, "event_logs".getBytes());

		String date = "2016-03-23";
		long startDate = TimeUtil.parseString2Long(date);
		long endDate = startDate + GlobalConstants.DAY_OF_MILLISECONDS;
		System.out.println();
		Scan scan = new Scan();
		// 定义hbase扫描的开始rowkey和结束rowkey
		scan.setStartRow(Bytes.toBytes("" + startDate));
		scan.setStopRow(Bytes.toBytes("" + endDate));

		FilterList filterList = new FilterList();
		// 过滤数据,只分析launch事件
		filterList.addFilter(new SingleColumnValueFilter(Bytes.toBytes(EventLogConstants.EVENT_LOGS_FAMILY_NAME),
				Bytes.toBytes(EventLogConstants.LOG_COLUMN_NAME_EVENT_NAME), CompareOp.EQUAL,
				Bytes.toBytes(EventEnum.LAUNCH.alias)));
		// 定义mapper中需要获取的列名
		String[] columns = new String[] { EventLogConstants.LOG_COLUMN_NAME_EVENT_NAME,
				EventLogConstants.LOG_COLUMN_NAME_UUID, EventLogConstants.LOG_COLUMN_NAME_SERVER_TIME,
				EventLogConstants.LOG_COLUMN_NAME_PLATFORM, EventLogConstants.LOG_COLUMN_NAME_BROWSER_NAME,
				EventLogConstants.LOG_COLUMN_NAME_BROWSER_VERSION };
		// scan.addColumn(family, qualifier)
		filterList.addFilter(getColumnFilter(columns));

		scan.setAttribute(Scan.SCAN_ATTRIBUTES_TABLE_NAME, Bytes.toBytes(EventLogConstants.HBASE_NAME_EVENT_LOGS));
		scan.setFilter(filterList);

		ResultScanner ress = table.getScanner(scan);
		for (Result res : ress) {
			Cell cell = res.getColumnLatestCell("info".getBytes(),
					EventLogConstants.LOG_COLUMN_NAME_UUID.getBytes());
			System.out.println(new String(CellUtil.cloneValue(cell)));
		}
		ress.close();
	} catch (Exception e) {
		// TODO Auto-generated catch block
		e.printStackTrace();
	}
}
 
开发者ID:liuhaozzu,项目名称:big_data,代码行数:47,代码来源:TestHbase.java


注:本文中的org.apache.hadoop.hbase.client.HTable.getScanner方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。