當前位置: 首頁>>代碼示例>>Java>>正文


Java Scan.setBatch方法代碼示例

本文整理匯總了Java中org.apache.hadoop.hbase.client.Scan.setBatch方法的典型用法代碼示例。如果您正苦於以下問題:Java Scan.setBatch方法的具體用法?Java Scan.setBatch怎麽用?Java Scan.setBatch使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.apache.hadoop.hbase.client.Scan的用法示例。


在下文中一共展示了Scan.setBatch方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: rowFilter

import org.apache.hadoop.hbase.client.Scan; //導入方法依賴的package包/類
/**
 * 使用行過濾器 選擇大於rowKey的行
 *
 * @param tableName 表名
 * @param rowKey    行健
 * @param count     數量
 */
public void rowFilter(String tableName, String rowKey, int count) {
    HBaseConfiguration hBaseConfiguration = new HBaseConfiguration();
    Table table = hBaseConfiguration.table(tableName);
    Scan scan = new Scan();
    //使用行過濾器 選擇大於 rowkey的行
    //scan.setFilter(new RowFilter(CompareFilter.CompareOp.GREATER, new BinaryComparator(Bytes.toBytes(rowKey))));//直接行健
    //scan.setFilter(new RowFilter(CompareFilter.CompareOp.GREATER_OR_EQUAL, new RegexStringComparator("row.*")));//正則表達式
    //scan.setFilter(new RowFilter(CompareFilter.CompareOp.GREATER_OR_EQUAL, new SubstringComparator("row")));//字符串包含
    scan.setFilter(new RowFilter(CompareFilter.CompareOp.GREATER_OR_EQUAL, new BinaryPrefixComparator("row".getBytes())));//字符串前綴
    scan.setCaching(10);
    scan.setBatch(10);
    try {
        ResultScanner scanner = table.getScanner(scan);
        Result[] results = scanner.next(count);
        HBaseResultUtil.print(results);
    } catch (IOException e) {
        e.printStackTrace();
    }
}
 
開發者ID:mumuhadoop,項目名稱:mumu-hbase,代碼行數:27,代碼來源:HBaseFilterOperation.java

示例2: familyFilter

import org.apache.hadoop.hbase.client.Scan; //導入方法依賴的package包/類
/**
 * 列族過濾器
 *
 * @param tableName 表名
 * @param rowFamily 列族
 * @param count     數量
 */
public void familyFilter(String tableName, String rowFamily, int count) {
    HBaseConfiguration hBaseConfiguration = new HBaseConfiguration();
    Table table = hBaseConfiguration.table(tableName);
    Scan scan = new Scan();
    //使用列族過濾器
    //scan.setFilter(new FamilyFilter(CompareFilter.CompareOp.GREATER, new BinaryComparator(Bytes.toBytes(rowFamily))));//直接行健
    //scan.setFilter(new FamilyFilter(CompareFilter.CompareOp.GREATER_OR_EQUAL, new RegexStringComparator("row.*")));//正則表達式
    //scan.setFilter(new FamilyFilter(CompareFilter.CompareOp.GREATER_OR_EQUAL, new SubstringComparator("row")));//字符串包含
    scan.setFilter(new FamilyFilter(CompareFilter.CompareOp.GREATER_OR_EQUAL, new BinaryPrefixComparator("mm".getBytes())));//字符串前綴
    scan.setCaching(10);
    scan.setBatch(10);
    try {
        ResultScanner scanner = table.getScanner(scan);
        Result[] results = scanner.next(count);
        HBaseResultUtil.print(results);
    } catch (IOException e) {
        e.printStackTrace();
    }
}
 
開發者ID:mumuhadoop,項目名稱:mumu-hbase,代碼行數:27,代碼來源:HBaseFilterOperation.java

示例3: qualifierFilter

import org.apache.hadoop.hbase.client.Scan; //導入方法依賴的package包/類
/**
 * 列限定符過濾器
 *
 * @param tableName  表名
 * @param columnName 列限定符
 * @param count      數量
 */
public void qualifierFilter(String tableName, String columnName, int count) {
    HBaseConfiguration hBaseConfiguration = new HBaseConfiguration();
    Table table = hBaseConfiguration.table(tableName);
    Scan scan = new Scan();
    //使用列族過濾器
    scan.setFilter(new QualifierFilter(CompareFilter.CompareOp.EQUAL, new BinaryComparator(Bytes.toBytes(columnName))));//直接行健
    //scan.setFilter(new QualifierFilter(CompareFilter.CompareOp.EQUAL, new RegexStringComparator("row.*")));//正則表達式
    //scan.setFilter(new QualifierFilter(CompareFilter.CompareOp.EQUAL, new SubstringComparator("row")));//字符串包含
    //scan.setFilter(new QualifierFilter(CompareFilter.CompareOp.EQUAL, new BinaryPrefixComparator("m".getBytes())));//字符串前綴
    scan.setCaching(10);
    scan.setBatch(10);
    try {
        ResultScanner scanner = table.getScanner(scan);
        Result[] results = scanner.next(count);
        HBaseResultUtil.print(results);
    } catch (IOException e) {
        e.printStackTrace();
    }
}
 
開發者ID:mumuhadoop,項目名稱:mumu-hbase,代碼行數:27,代碼來源:HBaseFilterOperation.java

示例4: valueFilter

import org.apache.hadoop.hbase.client.Scan; //導入方法依賴的package包/類
/**
 * 列限定符過濾器
 *
 * @param tableName   表名
 * @param columnValue 列值
 * @param count       數量
 */
public void valueFilter(String tableName, String columnValue, int count) {
    HBaseConfiguration hBaseConfiguration = new HBaseConfiguration();
    Table table = hBaseConfiguration.table(tableName);
    Scan scan = new Scan();
    //使用列族過濾器
    scan.setFilter(new ValueFilter(CompareFilter.CompareOp.EQUAL, new BinaryComparator(Bytes.toBytes(columnValue))));//直接行健
    //scan.setFilter(new ValueFilter(CompareFilter.CompareOp.EQUAL, new RegexStringComparator("row.*")));//正則表達式
    //scan.setFilter(new ValueFilter(CompareFilter.CompareOp.EQUAL, new SubstringComparator("row")));//字符串包含
    //scan.setFilter(new ValueFilter(CompareFilter.CompareOp.EQUAL, new BinaryPrefixComparator("mm".getBytes())));//字符串前綴
    scan.setCaching(10);
    scan.setBatch(10);
    try {
        ResultScanner scanner = table.getScanner(scan);
        Result[] results = scanner.next(count);
        HBaseResultUtil.print(results);
    } catch (IOException e) {
        e.printStackTrace();
    }
}
 
開發者ID:mumuhadoop,項目名稱:mumu-hbase,代碼行數:27,代碼來源:HBaseFilterOperation.java

示例5: dependentColumnFilter

import org.apache.hadoop.hbase.client.Scan; //導入方法依賴的package包/類
/**
 * 參考列過濾器(獲取相同時間戳的列)
 *
 * @param tableName    表名
 * @param columnFamily 列族
 * @param qualifier    列限定符
 * @param columnValue  列值
 * @param count        數量
 */
public void dependentColumnFilter(String tableName, String columnFamily, String qualifier, String columnValue, int count) {
    HBaseConfiguration hBaseConfiguration = new HBaseConfiguration();
    Table table = hBaseConfiguration.table(tableName);
    Scan scan = new Scan();
    scan.setFilter(new PrefixFilter(Bytes.toBytes("")));
    scan.setCaching(10);
    scan.setBatch(10);
    try {
        ResultScanner scanner = table.getScanner(scan);
        Result[] results = scanner.next(count);
        HBaseResultUtil.print(results);
    } catch (IOException e) {
        e.printStackTrace();
    }
}
 
開發者ID:mumuhadoop,項目名稱:mumu-hbase,代碼行數:25,代碼來源:HBaseFilterOperation.java

示例6: initScan

import org.apache.hadoop.hbase.client.Scan; //導入方法依賴的package包/類
Scan initScan() throws IOException {
  Scan scan = new Scan();
  scan.setCacheBlocks(false);
  if (startTime != 0 || endTime != 0) {
    scan.setTimeRange(startTime, endTime == 0 ? HConstants.LATEST_TIMESTAMP : endTime);
  }
  if (scanBatch > 0) {
    scan.setBatch(scanBatch);
  }
  if (versions >= 0) {
    scan.setMaxVersions(versions);
  }
  if (!isTableStartRow(startRow)) {
    scan.setStartRow(startRow);
  }
  if (!isTableEndRow(stopRow)) {
    scan.setStopRow(stopRow);
  }
  if(families != null) {
    for(String fam : families.split(",")) {
      scan.addFamily(Bytes.toBytes(fam));
    }
  }
  return scan;
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:26,代碼來源:HashTable.java

示例7: doAction

import org.apache.hadoop.hbase.client.Scan; //導入方法依賴的package包/類
@Override
protected boolean doAction() throws Exception {
  ResultScanner rs = null;
  try {
    Scan s = new Scan();
    s.setBatch(2);
    s.addFamily(FAMILY);
    s.setFilter(new KeyOnlyFilter());
    s.setMaxVersions(1);

    rs = table.getScanner(s);
    Result result = rs.next();
    return result != null && result.size() > 0;
  } finally {
    if (rs != null) {
      rs.close();
    }
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:20,代碼來源:IntegrationTestMTTR.java

示例8: findStartNode

import org.apache.hadoop.hbase.client.Scan; //導入方法依賴的package包/類
private static CINode findStartNode(Table table, byte[] startKey) throws IOException {
  Scan scan = new Scan();
  scan.setStartRow(startKey);
  scan.setBatch(1);
  scan.addColumn(FAMILY_NAME, COLUMN_PREV);

  long t1 = System.currentTimeMillis();
  ResultScanner scanner = table.getScanner(scan);
  Result result = scanner.next();
  long t2 = System.currentTimeMillis();
  scanner.close();

  if ( result != null) {
    CINode node = getCINode(result, new CINode());
    System.out.printf("FSR %d %s\n", t2 - t1, Bytes.toStringBinary(node.key));
    return node;
  }

  System.out.println("FSR " + (t2 - t1));

  return null;
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:23,代碼來源:IntegrationTestBigLinkedList.java

示例9: testScanWithLimit

import org.apache.hadoop.hbase.client.Scan; //導入方法依賴的package包/類
@Test
public void testScanWithLimit() {
  int kv_number = 0;
  try {
    Scan scan = new Scan();
    // set batch number as 2, which means each Result should contain 2 KVs at most
    scan.setBatch(2);
    SingleColumnValueFilter filter = new SingleColumnValueFilter(
        Bytes.toBytes(columnFamily), Bytes.toBytes("c5"),
        CompareFilter.CompareOp.EQUAL, new SubstringComparator("2_c5"));

    // add filter after batch defined
    scan.setFilter(filter);
    Table table = openTable(tableName);
    ResultScanner scanner = table.getScanner(scan);
    // Expect to get following row
    // row2 => <f1:c1, 2_c1>, <f1:c2, 2_c2>,
    // row2 => <f1:c3, 2_c3>, <f1:c4, 2_c4>,
    // row2 => <f1:c5, 2_c5>

    for (Result result : scanner) {
      for (Cell kv : result.listCells()) {
        kv_number++;
        LOG.debug(kv_number + ". kv: " + kv);
      }
    }

    scanner.close();
    table.close();
  } catch (Exception e) {
    // no correct result is expected
    assertNotNull("No IncompatibleFilterException catched", e);
  }
  LOG.debug("check the fetched kv number");
  assertEquals("We should not get result(s) returned.", 0, kv_number);
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:37,代碼來源:TestFilterWithScanLimits.java

示例10: testPartialResultsAndBatch

import org.apache.hadoop.hbase.client.Scan; //導入方法依賴的package包/類
public void testPartialResultsAndBatch(final int batch, final int cellsPerPartialResult)
    throws Exception {
  if (LOG.isInfoEnabled()) {
    LOG.info("batch: " + batch + " cellsPerPartialResult: " + cellsPerPartialResult);
  }

  Scan scan = new Scan();
  scan.setMaxResultSize(getResultSizeForNumberOfCells(cellsPerPartialResult));
  scan.setBatch(batch);
  ResultScanner scanner = TABLE.getScanner(scan);
  Result result = scanner.next();
  int repCount = 0;

  while ((result = scanner.next()) != null) {
    assertTrue(result.rawCells() != null);

    if (result.isPartial()) {
      final String error =
          "Cells:" + result.rawCells().length + " Batch size:" + batch
              + " cellsPerPartialResult:" + cellsPerPartialResult + " rep:" + repCount;
      assertTrue(error, result.rawCells().length <= Math.min(batch, cellsPerPartialResult));
    } else {
      assertTrue(result.rawCells().length <= batch);
    }
    repCount++;
  }

  scanner.close();
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:30,代碼來源:TestPartialResultsFromClientSide.java

示例11: testBatchingResultWhenRegionMove

import org.apache.hadoop.hbase.client.Scan; //導入方法依賴的package包/類
@Test
public void testBatchingResultWhenRegionMove() throws IOException {
  Table table =
      createTestTable(TableName.valueOf("testBatchingResultWhenRegionMove"), ROWS, FAMILIES,
          QUALIFIERS, VALUE);

  moveRegion(table, 1);

  Scan scan = new Scan();
  scan.setCaching(1);
  scan.setBatch(1);

  ResultScanner scanner = table.getScanner(scan);
  for (int i = 0; i < NUM_FAMILIES * NUM_QUALIFIERS - 1; i++) {
    scanner.next();
  }
  Result result1 = scanner.next();
  assertEquals(1, result1.rawCells().length);
  Cell c1 = result1.rawCells()[0];
  assertCell(c1, ROWS[0], FAMILIES[NUM_FAMILIES - 1], QUALIFIERS[NUM_QUALIFIERS - 1]);

  moveRegion(table, 2);

  Result result2 = scanner.next();
  assertEquals(1, result2.rawCells().length);
  Cell c2 = result2.rawCells()[0];
  assertCell(c2, ROWS[1], FAMILIES[0], QUALIFIERS[0]);

  moveRegion(table, 3);

  Result result3 = scanner.next();
  assertEquals(1, result3.rawCells().length);
  Cell c3 = result3.rawCells()[0];
  assertCell(c3, ROWS[1], FAMILIES[0], QUALIFIERS[1]);
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:36,代碼來源:TestPartialResultsFromClientSide.java

示例12: runCheck

import org.apache.hadoop.hbase.client.Scan; //導入方法依賴的package包/類
/**
 * After adding data to the table start a mr job to
 * @throws IOException
 * @throws ClassNotFoundException
 * @throws InterruptedException
 */
private void runCheck() throws IOException, ClassNotFoundException, InterruptedException {
  LOG.info("Running check");
  Configuration conf = getConf();
  String jobName = getTablename() + "_check" + EnvironmentEdgeManager.currentTime();
  Path p = util.getDataTestDirOnTestFS(jobName);

  Job job = new Job(conf);
  job.setJarByClass(getClass());
  job.setJobName(jobName);

  job.setPartitionerClass(NaturalKeyPartitioner.class);
  job.setGroupingComparatorClass(NaturalKeyGroupingComparator.class);
  job.setSortComparatorClass(CompositeKeyComparator.class);

  Scan scan = new Scan();
  scan.addFamily(CHAIN_FAM);
  scan.addFamily(SORT_FAM);
  scan.setMaxVersions(1);
  scan.setCacheBlocks(false);
  scan.setBatch(1000);

  int replicaCount = conf.getInt(NUM_REPLICA_COUNT_KEY, NUM_REPLICA_COUNT_DEFAULT);
  if (replicaCount != NUM_REPLICA_COUNT_DEFAULT) {
    scan.setConsistency(Consistency.TIMELINE);
  }

  TableMapReduceUtil.initTableMapperJob(
      getTablename().getName(),
      scan,
      LinkedListCheckingMapper.class,
      LinkKey.class,
      LinkChain.class,
      job
  );

  job.setReducerClass(LinkedListCheckingReducer.class);
  job.setOutputKeyClass(NullWritable.class);
  job.setOutputValueClass(NullWritable.class);

  FileOutputFormat.setOutputPath(job, p);

  assertEquals(true, job.waitForCompletion(true));

  // Delete the files.
  util.getTestFileSystem().delete(p, true);
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:53,代碼來源:IntegrationTestBulkLoad.java

示例13: scanFromThrift

import org.apache.hadoop.hbase.client.Scan; //導入方法依賴的package包/類
public static Scan scanFromThrift(TScan in) throws IOException {
  Scan out = new Scan();

  if (in.isSetStartRow())
    out.setStartRow(in.getStartRow());
  if (in.isSetStopRow())
    out.setStopRow(in.getStopRow());
  if (in.isSetCaching())
    out.setCaching(in.getCaching());
  if (in.isSetMaxVersions()) {
    out.setMaxVersions(in.getMaxVersions());
  }

  if (in.isSetColumns()) {
    for (TColumn column : in.getColumns()) {
      if (column.isSetQualifier()) {
        out.addColumn(column.getFamily(), column.getQualifier());
      } else {
        out.addFamily(column.getFamily());
      }
    }
  }

  TTimeRange timeRange = in.getTimeRange();
  if (timeRange != null &&
      timeRange.isSetMinStamp() && timeRange.isSetMaxStamp()) {
    out.setTimeRange(timeRange.getMinStamp(), timeRange.getMaxStamp());
  }

  if (in.isSetBatchSize()) {
    out.setBatch(in.getBatchSize());
  }

  if (in.isSetFilterString()) {
    ParseFilter parseFilter = new ParseFilter();
    out.setFilter(parseFilter.parseFilterString(in.getFilterString()));
  }

  if (in.isSetAttributes()) {
    addAttributes(out,in.getAttributes());
  }
  
  if (in.isSetAuthorizations()) {
    out.setAuthorizations(new Authorizations(in.getAuthorizations().getLabels()));
  }

  if (in.isSetReversed()) {
    out.setReversed(in.isReversed());
  }

  return out;
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:53,代碼來源:ThriftUtilities.java

示例14: scannerOpenWithScan

import org.apache.hadoop.hbase.client.Scan; //導入方法依賴的package包/類
@Override
public int scannerOpenWithScan(ByteBuffer tableName, TScan tScan,
    Map<ByteBuffer, ByteBuffer> attributes)
    throws IOError {

  Table table = null;
  try {
    table = getTable(tableName);
    Scan scan = new Scan();
    addAttributes(scan, attributes);
    if (tScan.isSetStartRow()) {
      scan.setStartRow(tScan.getStartRow());
    }
    if (tScan.isSetStopRow()) {
      scan.setStopRow(tScan.getStopRow());
    }
    if (tScan.isSetTimestamp()) {
      scan.setTimeRange(0, tScan.getTimestamp());
    }
    if (tScan.isSetCaching()) {
      scan.setCaching(tScan.getCaching());
    }
    if (tScan.isSetBatchSize()) {
      scan.setBatch(tScan.getBatchSize());
    }
    if (tScan.isSetColumns() && tScan.getColumns().size() != 0) {
      for(ByteBuffer column : tScan.getColumns()) {
        byte [][] famQf = KeyValue.parseColumn(getBytes(column));
        if(famQf.length == 1) {
          scan.addFamily(famQf[0]);
        } else {
          scan.addColumn(famQf[0], famQf[1]);
        }
      }
    }
    if (tScan.isSetFilterString()) {
      ParseFilter parseFilter = new ParseFilter();
      scan.setFilter(
          parseFilter.parseFilterString(tScan.getFilterString()));
    }
    if (tScan.isSetReversed()) {
      scan.setReversed(tScan.isReversed());
    }
    return addScanner(table.getScanner(scan), tScan.sortColumns);
  } catch (IOException e) {
    LOG.warn(e.getMessage(), e);
    throw new IOError(Throwables.getStackTraceAsString(e));
  } finally{
    closeTable(table);
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:52,代碼來源:ThriftServerRunner.java

示例15: getConfiguredScanForJob

import org.apache.hadoop.hbase.client.Scan; //導入方法依賴的package包/類
private static Scan getConfiguredScanForJob(Configuration conf, String[] args) throws IOException {
  Scan s = new Scan();
  // Optional arguments.
  // Set Scan Versions
  int versions = args.length > 2? Integer.parseInt(args[2]): 1;
  s.setMaxVersions(versions);
  // Set Scan Range
  long startTime = args.length > 3? Long.parseLong(args[3]): 0L;
  long endTime = args.length > 4? Long.parseLong(args[4]): Long.MAX_VALUE;
  s.setTimeRange(startTime, endTime);
  // Set cache blocks
  s.setCacheBlocks(false);
  // set Start and Stop row
  if (conf.get(TableInputFormat.SCAN_ROW_START) != null) {
    s.setStartRow(Bytes.toBytes(conf.get(TableInputFormat.SCAN_ROW_START)));
  }
  if (conf.get(TableInputFormat.SCAN_ROW_STOP) != null) {
    s.setStopRow(Bytes.toBytes(conf.get(TableInputFormat.SCAN_ROW_STOP)));
  }
  // Set Scan Column Family
  boolean raw = Boolean.parseBoolean(conf.get(RAW_SCAN));
  if (raw) {
    s.setRaw(raw);
  }
  
  if (conf.get(TableInputFormat.SCAN_COLUMN_FAMILY) != null) {
    s.addFamily(Bytes.toBytes(conf.get(TableInputFormat.SCAN_COLUMN_FAMILY)));
  }
  // Set RowFilter or Prefix Filter if applicable.
  Filter exportFilter = getExportFilter(args);
  if (exportFilter!= null) {
      LOG.info("Setting Scan Filter for Export.");
    s.setFilter(exportFilter);
  }

  int batching = conf.getInt(EXPORT_BATCHING, -1);
  if (batching !=  -1){
    try {
      s.setBatch(batching);
    } catch (IncompatibleFilterException e) {
      LOG.error("Batching could not be set", e);
    }
  }
  LOG.info("versions=" + versions + ", starttime=" + startTime +
    ", endtime=" + endTime + ", keepDeletedCells=" + raw);
  return s;
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:48,代碼來源:Export.java


注:本文中的org.apache.hadoop.hbase.client.Scan.setBatch方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。