當前位置: 首頁>>代碼示例>>Java>>正文


Java HTable.getScanner方法代碼示例

本文整理匯總了Java中org.apache.hadoop.hbase.client.HTable.getScanner方法的典型用法代碼示例。如果您正苦於以下問題:Java HTable.getScanner方法的具體用法?Java HTable.getScanner怎麽用?Java HTable.getScanner使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.apache.hadoop.hbase.client.HTable的用法示例。


在下文中一共展示了HTable.getScanner方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: getScanResult

import org.apache.hadoop.hbase.client.HTable; //導入方法依賴的package包/類
private List<Cell> getScanResult(byte[] startRow, byte[] stopRow, HTable ht) throws IOException {
  Scan scan = new Scan();
  scan.setMaxVersions();
  if(!Bytes.toString(startRow).isEmpty()) {
    scan.setStartRow(startRow);
  }
  if(!Bytes.toString(stopRow).isEmpty()) {
    scan.setStopRow(stopRow);
  }
  ResultScanner scanner = ht.getScanner(scan);
  List<Cell> kvList = new ArrayList<Cell>();
  Result r;
  while ((r = scanner.next()) != null) {
    for (Cell kv : r.listCells()) {
      kvList.add(kv);
    }
  }
  return kvList;
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:20,代碼來源:TestMultiRowRangeFilter.java

示例2: QueryByCondition2

import org.apache.hadoop.hbase.client.HTable; //導入方法依賴的package包/類
public static void QueryByCondition2(String tableName) {

        try {
            HTablePool pool = new HTablePool(configuration, 1000);
            HTable table = (HTable) pool.getTable(tableName);
            Filter filter = new SingleColumnValueFilter(Bytes
                    .toBytes("column1"), null, CompareOp.EQUAL, Bytes
                    .toBytes("aaa")); // 當列column1的值為aaa時進行查詢
            Scan s = new Scan();
            s.setFilter(filter);
            ResultScanner rs = table.getScanner(s);
            for (Result r : rs) {
                System.out.println("獲得到rowkey:" + new String(r.getRow()));
                for (KeyValue keyValue : r.raw()) {
                    System.out.println("列:" + new String(keyValue.getFamily())
                            + "====值:" + new String(keyValue.getValue()));
                }
            }
        } catch (Exception e) {
            e.printStackTrace();
        }

    }
 
開發者ID:yjp123456,項目名稱:SparkDemo,代碼行數:24,代碼來源:MyClass.java

示例3: getAllRecord

import org.apache.hadoop.hbase.client.HTable; //導入方法依賴的package包/類
/**
 * Getting all records  a row from an existing SS tables 
 * @method getAllRecord
 * @inputParameters hbaseBtable Name used
 * @return type: no return type as its a void method 
 * 
 **/
@SuppressWarnings({ "deprecation", "resource" })
public static void getAllRecord(String myHbaseBtableName) {
  ResultScanner hbaseBSs = null;
  try {
    HTable hbaseBtable = new HTable(hbaseBconf, myHbaseBtableName);
    Scan hbaseBScan = new Scan();
    hbaseBSs = hbaseBtable.getScanner(hbaseBScan);
    for (Result r : hbaseBSs) {
      for (KeyValue hbaseBkv : r.raw()) {
        System.out.print(new String(hbaseBkv.getRow()) + " ");
        System.out.print(new String(hbaseBkv.getFamily()) + ":");
        System.out.print(new String(hbaseBkv.getQualifier()) + " ");
        System.out.print(hbaseBkv.getTimestamp() + " ");
        System.out.println(new String(hbaseBkv.getValue()));
      }
    }
  } catch (IOException eio) {
    eip.printStackTrace();
  } finally {
    if (hbaseBSs != null) hbaseBSs.close();
    // closing the ss hbaseBtable 
  }
}
 
開發者ID:PacktPublishing,項目名稱:HBase-High-Performance-Cookbook,代碼行數:31,代碼來源:HBaseRegularClient.java

示例4: setup

import org.apache.hadoop.hbase.client.HTable; //導入方法依賴的package包/類
@Override
public void setup(OperatorContext context, OutputMutator output) throws ExecutionSetupException {
  this.operatorContext = context;
  this.outputMutator = output;
  familyVectorMap = new HashMap<String, MapVector>();

  try {
    // Add Vectors to output in the order specified when creating reader
    for (SchemaPath column : getColumns()) {
      if (column.equals(ROW_KEY_PATH)) {
        MaterializedField field = MaterializedField.create(column, ROW_KEY_TYPE);
        rowKeyVector = outputMutator.addField(field, VarBinaryVector.class);
      } else {
        getOrCreateFamilyVector(column.getRootSegment().getPath(), false);
      }
    }
    logger.debug("Opening scanner for HBase table '{}', Zookeeper quorum '{}', port '{}', znode '{}'.",
        hbaseTableName, hbaseConf.get(HConstants.ZOOKEEPER_QUORUM),
        hbaseConf.get(HBASE_ZOOKEEPER_PORT), hbaseConf.get(HConstants.ZOOKEEPER_ZNODE_PARENT));
    hTable = new HTable(hbaseConf, hbaseTableName);
    resultScanner = hTable.getScanner(hbaseScan);
  } catch (SchemaChangeException | IOException e) {
    throw new ExecutionSetupException(e);
  }
}
 
開發者ID:skhalifa,項目名稱:QDrill,代碼行數:26,代碼來源:HBaseRecordReader.java

示例5: countHBaseTable

import org.apache.hadoop.hbase.client.HTable; //導入方法依賴的package包/類
protected int countHBaseTable(String tableName, String colFamily)
    throws IOException {
  int count = 0;
  HTable table = new HTable(new Configuration(
      hbaseTestUtil.getConfiguration()), Bytes.toBytes(tableName));
  try {
    ResultScanner scanner = table.getScanner(Bytes.toBytes(colFamily));
    for(Result result = scanner.next();
        result != null;
        result = scanner.next()) {
      count++;
    }
  } finally {
    table.close();
  }
  return count;
}
 
開發者ID:aliyun,項目名稱:aliyun-maxcompute-data-collectors,代碼行數:18,代碼來源:HBaseTestCase.java

示例6: waitUntilAllRegionsAssigned

import org.apache.hadoop.hbase.client.HTable; //導入方法依賴的package包/類
private static void waitUntilAllRegionsAssigned()
throws IOException {
  HTable meta = new HTable(TEST_UTIL.getConfiguration(), TableName.META_TABLE_NAME);
  while (true) {
    int rows = 0;
    Scan scan = new Scan();
    scan.addColumn(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER);
    ResultScanner s = meta.getScanner(scan);
    for (Result r = null; (r = s.next()) != null;) {
      byte [] b =
        r.getValue(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER);
      if (b == null || b.length <= 0) {
        break;
      }
      rows++;
    }
    s.close();
    // If I get to here and all rows have a Server, then all have been assigned.
    if (rows >= countOfRegions) {
      break;
    }
    LOG.info("Found=" + rows);
    Threads.sleep(1000);
  }
  meta.close();
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:27,代碼來源:TestZKBasedOpenCloseRegion.java

示例7: testHBASE14489

import org.apache.hadoop.hbase.client.HTable; //導入方法依賴的package包/類
@Test(timeout = 300000)
public void testHBASE14489() throws IOException {
  TableName tableName = TableName.valueOf("testHBASE14489");
  HTable table = util.createTable(tableName, new byte[][] { A });
  Put put = new Put(ROW);
  put.addColumn(A, A, A);
  table.put(put);

  Scan s = new Scan();
  s.setFilter(new FilterAllFilter());
  ResultScanner scanner = table.getScanner(s);
  try {
    for (Result rr = scanner.next(); rr != null; rr = scanner.next()) {
    }
  } finally {
    scanner.close();
  }
  verifyMethodResult(SimpleRegionObserver.class, new String[] { "wasScannerFilterRowCalled" },
    tableName, new Boolean[] { true });
  util.deleteTable(tableName);
  table.close();
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:23,代碼來源:TestRegionObserverInterface.java

示例8: deleteTableData

import org.apache.hadoop.hbase.client.HTable; //導入方法依賴的package包/類
/**
 * Provide an existing table name to truncate.
 * Scans the table and issues a delete for each row read.
 * @param tableName existing table
 * @return HTable to that new table
 * @throws IOException
 */
public HTable deleteTableData(TableName tableName) throws IOException {
  HTable table = new HTable(getConfiguration(), tableName);
  Scan scan = new Scan();
  ResultScanner resScan = table.getScanner(scan);
  for(Result res : resScan) {
    Delete del = new Delete(res.getRow());
    table.delete(del);
  }
  resScan = table.getScanner(scan);
  resScan.close();
  return table;
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:20,代碼來源:HBaseTestingUtility.java

示例9: addToEachStartKey

import org.apache.hadoop.hbase.client.HTable; //導入方法依賴的package包/類
private static int addToEachStartKey(final int expected) throws IOException {
  HTable t = new HTable(TEST_UTIL.getConfiguration(), TABLENAME);
  HTable meta = new HTable(TEST_UTIL.getConfiguration(),
      TableName.META_TABLE_NAME);
  int rows = 0;
  Scan scan = new Scan();
  scan.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
  ResultScanner s = meta.getScanner(scan);
  for (Result r = null; (r = s.next()) != null;) {
    HRegionInfo hri = HRegionInfo.getHRegionInfo(r);
    if (hri == null) break;
    if(!hri.getTable().equals(TABLENAME)) {
      continue;
    }
    // If start key, add 'aaa'.
    byte [] row = getStartKey(hri);
    Put p = new Put(row);
    p.setDurability(Durability.SKIP_WAL);
    p.add(getTestFamily(), getTestQualifier(), row);
    t.put(p);
    rows++;
  }
  s.close();
  Assert.assertEquals(expected, rows);
  t.close();
  meta.close();
  return rows;
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:29,代碼來源:TestZKBasedOpenCloseRegion.java

示例10: scanMeta

import org.apache.hadoop.hbase.client.HTable; //導入方法依賴的package包/類
/**
 * Dumps hbase:meta table info
 *
 * @return # of entries in meta.
 */
protected int scanMeta() throws IOException {
  int count = 0;
  HTable meta = new HTable(conf, TableName.META_TABLE_NAME);
  ResultScanner scanner = meta.getScanner(new Scan());
  LOG.info("Table: " + Bytes.toString(meta.getTableName()));
  for (Result res : scanner) {
    LOG.info(Bytes.toString(res.getRow()));
    count++;
  }
  meta.close();
  return count;
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:18,代碼來源:OfflineMetaRebuildTestCore.java

示例11: testBulkLoad

import org.apache.hadoop.hbase.client.HTable; //導入方法依賴的package包/類
@Test
public void testBulkLoad() throws Exception {
  TableName tableName = TableName.valueOf("testBulkLoad");
  long l = System.currentTimeMillis();
  HBaseAdmin admin = new HBaseAdmin(TEST_UTIL.getConfiguration());
  createTable(admin, tableName);
  Scan scan = createScan();
  final HTable table = init(admin, l, scan, tableName);
  // use bulkload
  final Path hfilePath = writeToHFile(l, "/temp/testBulkLoad/", "/temp/testBulkLoad/col/file",
    false);
  Configuration conf = TEST_UTIL.getConfiguration();
  conf.setBoolean("hbase.mapreduce.bulkload.assign.sequenceNumbers", true);
  final LoadIncrementalHFiles bulkload = new LoadIncrementalHFiles(conf);
  bulkload.doBulkLoad(hfilePath, table);
  ResultScanner scanner = table.getScanner(scan);
  Result result = scanner.next();
  result = scanAfterBulkLoad(scanner, result, "version2");
  Put put0 = new Put(Bytes.toBytes("row1"));
  put0.add(new KeyValue(Bytes.toBytes("row1"), Bytes.toBytes("col"), Bytes.toBytes("q"), l, Bytes
      .toBytes("version3")));
  table.put(put0);
  admin.flush(tableName);
  scanner = table.getScanner(scan);
  result = scanner.next();
  while (result != null) {
    List<KeyValue> kvs = result.getColumn(Bytes.toBytes("col"), Bytes.toBytes("q"));
    for (KeyValue _kv : kvs) {
      if (Bytes.toString(_kv.getRow()).equals("row1")) {
        System.out.println(Bytes.toString(_kv.getRow()));
        System.out.println(Bytes.toString(_kv.getQualifier()));
        System.out.println(Bytes.toString(_kv.getValue()));
        Assert.assertEquals("version3", Bytes.toString(_kv.getValue()));
      }
    }
    result = scanner.next();
  }
  scanner.close();
  table.close();
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:41,代碼來源:TestScannerWithBulkload.java

示例12: getResultsSize

import org.apache.hadoop.hbase.client.HTable; //導入方法依賴的package包/類
private int getResultsSize(HTable ht, Scan scan) throws IOException {
  ResultScanner scanner = ht.getScanner(scan);
  List<Cell> results = new ArrayList<Cell>();
  Result r;
  while ((r = scanner.next()) != null) {
    for (Cell kv : r.listCells()) {
      results.add(kv);
    }
  }
  return results.size();
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:12,代碼來源:TestMultiRowRangeFilter.java

示例13: assertAllOnLine

import org.apache.hadoop.hbase.client.HTable; //導入方法依賴的package包/類
private void assertAllOnLine(final HTable t) throws IOException {
  NavigableMap<HRegionInfo, ServerName> regions = t.getRegionLocations();
  for (Map.Entry<HRegionInfo, ServerName> e: regions.entrySet()) {
    byte [] startkey = e.getKey().getStartKey();
    Scan s = new Scan(startkey);
    ResultScanner scanner = t.getScanner(s);
    Result r = scanner.next();
    org.junit.Assert.assertTrue(r != null && r.size() > 0);
    scanner.close();
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:12,代碼來源:TestLoadAndSwitchEncodeOnDisk.java

示例14: QueryByCondition3

import org.apache.hadoop.hbase.client.HTable; //導入方法依賴的package包/類
public static void QueryByCondition3(String tableName) {

        try {
            HTablePool pool = new HTablePool(configuration, 1000);
            HTable table = (HTable) pool.getTable(tableName);

            List<Filter> filters = new ArrayList<Filter>();

            Filter filter1 = new SingleColumnValueFilter(Bytes
                    .toBytes("column1"), null, CompareOp.EQUAL, Bytes
                    .toBytes("aaa"));
            filters.add(filter1);

            Filter filter2 = new SingleColumnValueFilter(Bytes
                    .toBytes("column2"), null, CompareOp.EQUAL, Bytes
                    .toBytes("bbb"));
            filters.add(filter2);

            Filter filter3 = new SingleColumnValueFilter(Bytes
                    .toBytes("column3"), null, CompareOp.EQUAL, Bytes
                    .toBytes("ccc"));
            filters.add(filter3);

            FilterList filterList1 = new FilterList(filters);

            Scan scan = new Scan();
            scan.setFilter(filterList1);
            ResultScanner rs = table.getScanner(scan);
            for (Result r : rs) {
                System.out.println("獲得到rowkey:" + new String(r.getRow()));
                for (KeyValue keyValue : r.raw()) {
                    System.out.println("列:" + new String(keyValue.getFamily())
                            + "====值:" + new String(keyValue.getValue()));
                }
            }
            rs.close();

        } catch (Exception e) {
            e.printStackTrace();
        }

    }
 
開發者ID:yjp123456,項目名稱:SparkDemo,代碼行數:43,代碼來源:MyClass.java

示例15: main

import org.apache.hadoop.hbase.client.HTable; //導入方法依賴的package包/類
public static void main(String[] args) {
	try {
		Configuration conf = new Configuration();
		conf.set("fs.defaultFS", "hdfs://hadoop1:8020");
		conf.set("yarn.resourcemanager.hostname", "hadoop1");
		conf.set("hbase.zookeeper.quorum", "hadoop1,hadoop2,hadoop3");
		conf = HBaseConfiguration.create(conf);
		HTable table = new HTable(conf, "event_logs".getBytes());

		String date = "2016-03-23";
		long startDate = TimeUtil.parseString2Long(date);
		long endDate = startDate + GlobalConstants.DAY_OF_MILLISECONDS;
		System.out.println();
		Scan scan = new Scan();
		// 定義hbase掃描的開始rowkey和結束rowkey
		scan.setStartRow(Bytes.toBytes("" + startDate));
		scan.setStopRow(Bytes.toBytes("" + endDate));

		FilterList filterList = new FilterList();
		// 過濾數據,隻分析launch事件
		filterList.addFilter(new SingleColumnValueFilter(Bytes.toBytes(EventLogConstants.EVENT_LOGS_FAMILY_NAME),
				Bytes.toBytes(EventLogConstants.LOG_COLUMN_NAME_EVENT_NAME), CompareOp.EQUAL,
				Bytes.toBytes(EventEnum.LAUNCH.alias)));
		// 定義mapper中需要獲取的列名
		String[] columns = new String[] { EventLogConstants.LOG_COLUMN_NAME_EVENT_NAME,
				EventLogConstants.LOG_COLUMN_NAME_UUID, EventLogConstants.LOG_COLUMN_NAME_SERVER_TIME,
				EventLogConstants.LOG_COLUMN_NAME_PLATFORM, EventLogConstants.LOG_COLUMN_NAME_BROWSER_NAME,
				EventLogConstants.LOG_COLUMN_NAME_BROWSER_VERSION };
		// scan.addColumn(family, qualifier)
		filterList.addFilter(getColumnFilter(columns));

		scan.setAttribute(Scan.SCAN_ATTRIBUTES_TABLE_NAME, Bytes.toBytes(EventLogConstants.HBASE_NAME_EVENT_LOGS));
		scan.setFilter(filterList);

		ResultScanner ress = table.getScanner(scan);
		for (Result res : ress) {
			Cell cell = res.getColumnLatestCell("info".getBytes(),
					EventLogConstants.LOG_COLUMN_NAME_UUID.getBytes());
			System.out.println(new String(CellUtil.cloneValue(cell)));
		}
		ress.close();
	} catch (Exception e) {
		// TODO Auto-generated catch block
		e.printStackTrace();
	}
}
 
開發者ID:liuhaozzu,項目名稱:big_data,代碼行數:47,代碼來源:TestHbase.java


注:本文中的org.apache.hadoop.hbase.client.HTable.getScanner方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。