当前位置: 首页>>代码示例>>Java>>正文


Java Scan.setBatch方法代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.client.Scan.setBatch方法的典型用法代码示例。如果您正苦于以下问题:Java Scan.setBatch方法的具体用法?Java Scan.setBatch怎么用?Java Scan.setBatch使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hbase.client.Scan的用法示例。


在下文中一共展示了Scan.setBatch方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: rowFilter

import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
/**
 * 使用行过滤器 选择大于rowKey的行
 *
 * @param tableName 表名
 * @param rowKey    行健
 * @param count     数量
 */
public void rowFilter(String tableName, String rowKey, int count) {
    HBaseConfiguration hBaseConfiguration = new HBaseConfiguration();
    Table table = hBaseConfiguration.table(tableName);
    Scan scan = new Scan();
    //使用行过滤器 选择大于 rowkey的行
    //scan.setFilter(new RowFilter(CompareFilter.CompareOp.GREATER, new BinaryComparator(Bytes.toBytes(rowKey))));//直接行健
    //scan.setFilter(new RowFilter(CompareFilter.CompareOp.GREATER_OR_EQUAL, new RegexStringComparator("row.*")));//正则表达式
    //scan.setFilter(new RowFilter(CompareFilter.CompareOp.GREATER_OR_EQUAL, new SubstringComparator("row")));//字符串包含
    scan.setFilter(new RowFilter(CompareFilter.CompareOp.GREATER_OR_EQUAL, new BinaryPrefixComparator("row".getBytes())));//字符串前缀
    scan.setCaching(10);
    scan.setBatch(10);
    try {
        ResultScanner scanner = table.getScanner(scan);
        Result[] results = scanner.next(count);
        HBaseResultUtil.print(results);
    } catch (IOException e) {
        e.printStackTrace();
    }
}
 
开发者ID:mumuhadoop,项目名称:mumu-hbase,代码行数:27,代码来源:HBaseFilterOperation.java

示例2: familyFilter

import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
/**
 * 列族过滤器
 *
 * @param tableName 表名
 * @param rowFamily 列族
 * @param count     数量
 */
public void familyFilter(String tableName, String rowFamily, int count) {
    HBaseConfiguration hBaseConfiguration = new HBaseConfiguration();
    Table table = hBaseConfiguration.table(tableName);
    Scan scan = new Scan();
    //使用列族过滤器
    //scan.setFilter(new FamilyFilter(CompareFilter.CompareOp.GREATER, new BinaryComparator(Bytes.toBytes(rowFamily))));//直接行健
    //scan.setFilter(new FamilyFilter(CompareFilter.CompareOp.GREATER_OR_EQUAL, new RegexStringComparator("row.*")));//正则表达式
    //scan.setFilter(new FamilyFilter(CompareFilter.CompareOp.GREATER_OR_EQUAL, new SubstringComparator("row")));//字符串包含
    scan.setFilter(new FamilyFilter(CompareFilter.CompareOp.GREATER_OR_EQUAL, new BinaryPrefixComparator("mm".getBytes())));//字符串前缀
    scan.setCaching(10);
    scan.setBatch(10);
    try {
        ResultScanner scanner = table.getScanner(scan);
        Result[] results = scanner.next(count);
        HBaseResultUtil.print(results);
    } catch (IOException e) {
        e.printStackTrace();
    }
}
 
开发者ID:mumuhadoop,项目名称:mumu-hbase,代码行数:27,代码来源:HBaseFilterOperation.java

示例3: qualifierFilter

import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
/**
 * 列限定符过滤器
 *
 * @param tableName  表名
 * @param columnName 列限定符
 * @param count      数量
 */
public void qualifierFilter(String tableName, String columnName, int count) {
    HBaseConfiguration hBaseConfiguration = new HBaseConfiguration();
    Table table = hBaseConfiguration.table(tableName);
    Scan scan = new Scan();
    //使用列族过滤器
    scan.setFilter(new QualifierFilter(CompareFilter.CompareOp.EQUAL, new BinaryComparator(Bytes.toBytes(columnName))));//直接行健
    //scan.setFilter(new QualifierFilter(CompareFilter.CompareOp.EQUAL, new RegexStringComparator("row.*")));//正则表达式
    //scan.setFilter(new QualifierFilter(CompareFilter.CompareOp.EQUAL, new SubstringComparator("row")));//字符串包含
    //scan.setFilter(new QualifierFilter(CompareFilter.CompareOp.EQUAL, new BinaryPrefixComparator("m".getBytes())));//字符串前缀
    scan.setCaching(10);
    scan.setBatch(10);
    try {
        ResultScanner scanner = table.getScanner(scan);
        Result[] results = scanner.next(count);
        HBaseResultUtil.print(results);
    } catch (IOException e) {
        e.printStackTrace();
    }
}
 
开发者ID:mumuhadoop,项目名称:mumu-hbase,代码行数:27,代码来源:HBaseFilterOperation.java

示例4: valueFilter

import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
/**
 * 列限定符过滤器
 *
 * @param tableName   表名
 * @param columnValue 列值
 * @param count       数量
 */
public void valueFilter(String tableName, String columnValue, int count) {
    HBaseConfiguration hBaseConfiguration = new HBaseConfiguration();
    Table table = hBaseConfiguration.table(tableName);
    Scan scan = new Scan();
    //使用列族过滤器
    scan.setFilter(new ValueFilter(CompareFilter.CompareOp.EQUAL, new BinaryComparator(Bytes.toBytes(columnValue))));//直接行健
    //scan.setFilter(new ValueFilter(CompareFilter.CompareOp.EQUAL, new RegexStringComparator("row.*")));//正则表达式
    //scan.setFilter(new ValueFilter(CompareFilter.CompareOp.EQUAL, new SubstringComparator("row")));//字符串包含
    //scan.setFilter(new ValueFilter(CompareFilter.CompareOp.EQUAL, new BinaryPrefixComparator("mm".getBytes())));//字符串前缀
    scan.setCaching(10);
    scan.setBatch(10);
    try {
        ResultScanner scanner = table.getScanner(scan);
        Result[] results = scanner.next(count);
        HBaseResultUtil.print(results);
    } catch (IOException e) {
        e.printStackTrace();
    }
}
 
开发者ID:mumuhadoop,项目名称:mumu-hbase,代码行数:27,代码来源:HBaseFilterOperation.java

示例5: dependentColumnFilter

import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
/**
 * 参考列过滤器(获取相同时间戳的列)
 *
 * @param tableName    表名
 * @param columnFamily 列族
 * @param qualifier    列限定符
 * @param columnValue  列值
 * @param count        数量
 */
public void dependentColumnFilter(String tableName, String columnFamily, String qualifier, String columnValue, int count) {
    HBaseConfiguration hBaseConfiguration = new HBaseConfiguration();
    Table table = hBaseConfiguration.table(tableName);
    Scan scan = new Scan();
    scan.setFilter(new PrefixFilter(Bytes.toBytes("")));
    scan.setCaching(10);
    scan.setBatch(10);
    try {
        ResultScanner scanner = table.getScanner(scan);
        Result[] results = scanner.next(count);
        HBaseResultUtil.print(results);
    } catch (IOException e) {
        e.printStackTrace();
    }
}
 
开发者ID:mumuhadoop,项目名称:mumu-hbase,代码行数:25,代码来源:HBaseFilterOperation.java

示例6: initScan

import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
Scan initScan() throws IOException {
  Scan scan = new Scan();
  scan.setCacheBlocks(false);
  if (startTime != 0 || endTime != 0) {
    scan.setTimeRange(startTime, endTime == 0 ? HConstants.LATEST_TIMESTAMP : endTime);
  }
  if (scanBatch > 0) {
    scan.setBatch(scanBatch);
  }
  if (versions >= 0) {
    scan.setMaxVersions(versions);
  }
  if (!isTableStartRow(startRow)) {
    scan.setStartRow(startRow);
  }
  if (!isTableEndRow(stopRow)) {
    scan.setStopRow(stopRow);
  }
  if(families != null) {
    for(String fam : families.split(",")) {
      scan.addFamily(Bytes.toBytes(fam));
    }
  }
  return scan;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:26,代码来源:HashTable.java

示例7: doAction

import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
@Override
protected boolean doAction() throws Exception {
  ResultScanner rs = null;
  try {
    Scan s = new Scan();
    s.setBatch(2);
    s.addFamily(FAMILY);
    s.setFilter(new KeyOnlyFilter());
    s.setMaxVersions(1);

    rs = table.getScanner(s);
    Result result = rs.next();
    return result != null && result.size() > 0;
  } finally {
    if (rs != null) {
      rs.close();
    }
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:20,代码来源:IntegrationTestMTTR.java

示例8: findStartNode

import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
private static CINode findStartNode(Table table, byte[] startKey) throws IOException {
  Scan scan = new Scan();
  scan.setStartRow(startKey);
  scan.setBatch(1);
  scan.addColumn(FAMILY_NAME, COLUMN_PREV);

  long t1 = System.currentTimeMillis();
  ResultScanner scanner = table.getScanner(scan);
  Result result = scanner.next();
  long t2 = System.currentTimeMillis();
  scanner.close();

  if ( result != null) {
    CINode node = getCINode(result, new CINode());
    System.out.printf("FSR %d %s\n", t2 - t1, Bytes.toStringBinary(node.key));
    return node;
  }

  System.out.println("FSR " + (t2 - t1));

  return null;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:23,代码来源:IntegrationTestBigLinkedList.java

示例9: testScanWithLimit

import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
@Test
public void testScanWithLimit() {
  int kv_number = 0;
  try {
    Scan scan = new Scan();
    // set batch number as 2, which means each Result should contain 2 KVs at most
    scan.setBatch(2);
    SingleColumnValueFilter filter = new SingleColumnValueFilter(
        Bytes.toBytes(columnFamily), Bytes.toBytes("c5"),
        CompareFilter.CompareOp.EQUAL, new SubstringComparator("2_c5"));

    // add filter after batch defined
    scan.setFilter(filter);
    Table table = openTable(tableName);
    ResultScanner scanner = table.getScanner(scan);
    // Expect to get following row
    // row2 => <f1:c1, 2_c1>, <f1:c2, 2_c2>,
    // row2 => <f1:c3, 2_c3>, <f1:c4, 2_c4>,
    // row2 => <f1:c5, 2_c5>

    for (Result result : scanner) {
      for (Cell kv : result.listCells()) {
        kv_number++;
        LOG.debug(kv_number + ". kv: " + kv);
      }
    }

    scanner.close();
    table.close();
  } catch (Exception e) {
    // no correct result is expected
    assertNotNull("No IncompatibleFilterException catched", e);
  }
  LOG.debug("check the fetched kv number");
  assertEquals("We should not get result(s) returned.", 0, kv_number);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:37,代码来源:TestFilterWithScanLimits.java

示例10: testPartialResultsAndBatch

import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
public void testPartialResultsAndBatch(final int batch, final int cellsPerPartialResult)
    throws Exception {
  if (LOG.isInfoEnabled()) {
    LOG.info("batch: " + batch + " cellsPerPartialResult: " + cellsPerPartialResult);
  }

  Scan scan = new Scan();
  scan.setMaxResultSize(getResultSizeForNumberOfCells(cellsPerPartialResult));
  scan.setBatch(batch);
  ResultScanner scanner = TABLE.getScanner(scan);
  Result result = scanner.next();
  int repCount = 0;

  while ((result = scanner.next()) != null) {
    assertTrue(result.rawCells() != null);

    if (result.isPartial()) {
      final String error =
          "Cells:" + result.rawCells().length + " Batch size:" + batch
              + " cellsPerPartialResult:" + cellsPerPartialResult + " rep:" + repCount;
      assertTrue(error, result.rawCells().length <= Math.min(batch, cellsPerPartialResult));
    } else {
      assertTrue(result.rawCells().length <= batch);
    }
    repCount++;
  }

  scanner.close();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:30,代码来源:TestPartialResultsFromClientSide.java

示例11: testBatchingResultWhenRegionMove

import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
@Test
public void testBatchingResultWhenRegionMove() throws IOException {
  Table table =
      createTestTable(TableName.valueOf("testBatchingResultWhenRegionMove"), ROWS, FAMILIES,
          QUALIFIERS, VALUE);

  moveRegion(table, 1);

  Scan scan = new Scan();
  scan.setCaching(1);
  scan.setBatch(1);

  ResultScanner scanner = table.getScanner(scan);
  for (int i = 0; i < NUM_FAMILIES * NUM_QUALIFIERS - 1; i++) {
    scanner.next();
  }
  Result result1 = scanner.next();
  assertEquals(1, result1.rawCells().length);
  Cell c1 = result1.rawCells()[0];
  assertCell(c1, ROWS[0], FAMILIES[NUM_FAMILIES - 1], QUALIFIERS[NUM_QUALIFIERS - 1]);

  moveRegion(table, 2);

  Result result2 = scanner.next();
  assertEquals(1, result2.rawCells().length);
  Cell c2 = result2.rawCells()[0];
  assertCell(c2, ROWS[1], FAMILIES[0], QUALIFIERS[0]);

  moveRegion(table, 3);

  Result result3 = scanner.next();
  assertEquals(1, result3.rawCells().length);
  Cell c3 = result3.rawCells()[0];
  assertCell(c3, ROWS[1], FAMILIES[0], QUALIFIERS[1]);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:36,代码来源:TestPartialResultsFromClientSide.java

示例12: runCheck

import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
/**
 * After adding data to the table start a mr job to
 * @throws IOException
 * @throws ClassNotFoundException
 * @throws InterruptedException
 */
private void runCheck() throws IOException, ClassNotFoundException, InterruptedException {
  LOG.info("Running check");
  Configuration conf = getConf();
  String jobName = getTablename() + "_check" + EnvironmentEdgeManager.currentTime();
  Path p = util.getDataTestDirOnTestFS(jobName);

  Job job = new Job(conf);
  job.setJarByClass(getClass());
  job.setJobName(jobName);

  job.setPartitionerClass(NaturalKeyPartitioner.class);
  job.setGroupingComparatorClass(NaturalKeyGroupingComparator.class);
  job.setSortComparatorClass(CompositeKeyComparator.class);

  Scan scan = new Scan();
  scan.addFamily(CHAIN_FAM);
  scan.addFamily(SORT_FAM);
  scan.setMaxVersions(1);
  scan.setCacheBlocks(false);
  scan.setBatch(1000);

  int replicaCount = conf.getInt(NUM_REPLICA_COUNT_KEY, NUM_REPLICA_COUNT_DEFAULT);
  if (replicaCount != NUM_REPLICA_COUNT_DEFAULT) {
    scan.setConsistency(Consistency.TIMELINE);
  }

  TableMapReduceUtil.initTableMapperJob(
      getTablename().getName(),
      scan,
      LinkedListCheckingMapper.class,
      LinkKey.class,
      LinkChain.class,
      job
  );

  job.setReducerClass(LinkedListCheckingReducer.class);
  job.setOutputKeyClass(NullWritable.class);
  job.setOutputValueClass(NullWritable.class);

  FileOutputFormat.setOutputPath(job, p);

  assertEquals(true, job.waitForCompletion(true));

  // Delete the files.
  util.getTestFileSystem().delete(p, true);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:53,代码来源:IntegrationTestBulkLoad.java

示例13: scanFromThrift

import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
public static Scan scanFromThrift(TScan in) throws IOException {
  Scan out = new Scan();

  if (in.isSetStartRow())
    out.setStartRow(in.getStartRow());
  if (in.isSetStopRow())
    out.setStopRow(in.getStopRow());
  if (in.isSetCaching())
    out.setCaching(in.getCaching());
  if (in.isSetMaxVersions()) {
    out.setMaxVersions(in.getMaxVersions());
  }

  if (in.isSetColumns()) {
    for (TColumn column : in.getColumns()) {
      if (column.isSetQualifier()) {
        out.addColumn(column.getFamily(), column.getQualifier());
      } else {
        out.addFamily(column.getFamily());
      }
    }
  }

  TTimeRange timeRange = in.getTimeRange();
  if (timeRange != null &&
      timeRange.isSetMinStamp() && timeRange.isSetMaxStamp()) {
    out.setTimeRange(timeRange.getMinStamp(), timeRange.getMaxStamp());
  }

  if (in.isSetBatchSize()) {
    out.setBatch(in.getBatchSize());
  }

  if (in.isSetFilterString()) {
    ParseFilter parseFilter = new ParseFilter();
    out.setFilter(parseFilter.parseFilterString(in.getFilterString()));
  }

  if (in.isSetAttributes()) {
    addAttributes(out,in.getAttributes());
  }
  
  if (in.isSetAuthorizations()) {
    out.setAuthorizations(new Authorizations(in.getAuthorizations().getLabels()));
  }

  if (in.isSetReversed()) {
    out.setReversed(in.isReversed());
  }

  return out;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:53,代码来源:ThriftUtilities.java

示例14: scannerOpenWithScan

import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
@Override
public int scannerOpenWithScan(ByteBuffer tableName, TScan tScan,
    Map<ByteBuffer, ByteBuffer> attributes)
    throws IOError {

  Table table = null;
  try {
    table = getTable(tableName);
    Scan scan = new Scan();
    addAttributes(scan, attributes);
    if (tScan.isSetStartRow()) {
      scan.setStartRow(tScan.getStartRow());
    }
    if (tScan.isSetStopRow()) {
      scan.setStopRow(tScan.getStopRow());
    }
    if (tScan.isSetTimestamp()) {
      scan.setTimeRange(0, tScan.getTimestamp());
    }
    if (tScan.isSetCaching()) {
      scan.setCaching(tScan.getCaching());
    }
    if (tScan.isSetBatchSize()) {
      scan.setBatch(tScan.getBatchSize());
    }
    if (tScan.isSetColumns() && tScan.getColumns().size() != 0) {
      for(ByteBuffer column : tScan.getColumns()) {
        byte [][] famQf = KeyValue.parseColumn(getBytes(column));
        if(famQf.length == 1) {
          scan.addFamily(famQf[0]);
        } else {
          scan.addColumn(famQf[0], famQf[1]);
        }
      }
    }
    if (tScan.isSetFilterString()) {
      ParseFilter parseFilter = new ParseFilter();
      scan.setFilter(
          parseFilter.parseFilterString(tScan.getFilterString()));
    }
    if (tScan.isSetReversed()) {
      scan.setReversed(tScan.isReversed());
    }
    return addScanner(table.getScanner(scan), tScan.sortColumns);
  } catch (IOException e) {
    LOG.warn(e.getMessage(), e);
    throw new IOError(Throwables.getStackTraceAsString(e));
  } finally{
    closeTable(table);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:52,代码来源:ThriftServerRunner.java

示例15: getConfiguredScanForJob

import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
private static Scan getConfiguredScanForJob(Configuration conf, String[] args) throws IOException {
  Scan s = new Scan();
  // Optional arguments.
  // Set Scan Versions
  int versions = args.length > 2? Integer.parseInt(args[2]): 1;
  s.setMaxVersions(versions);
  // Set Scan Range
  long startTime = args.length > 3? Long.parseLong(args[3]): 0L;
  long endTime = args.length > 4? Long.parseLong(args[4]): Long.MAX_VALUE;
  s.setTimeRange(startTime, endTime);
  // Set cache blocks
  s.setCacheBlocks(false);
  // set Start and Stop row
  if (conf.get(TableInputFormat.SCAN_ROW_START) != null) {
    s.setStartRow(Bytes.toBytes(conf.get(TableInputFormat.SCAN_ROW_START)));
  }
  if (conf.get(TableInputFormat.SCAN_ROW_STOP) != null) {
    s.setStopRow(Bytes.toBytes(conf.get(TableInputFormat.SCAN_ROW_STOP)));
  }
  // Set Scan Column Family
  boolean raw = Boolean.parseBoolean(conf.get(RAW_SCAN));
  if (raw) {
    s.setRaw(raw);
  }
  
  if (conf.get(TableInputFormat.SCAN_COLUMN_FAMILY) != null) {
    s.addFamily(Bytes.toBytes(conf.get(TableInputFormat.SCAN_COLUMN_FAMILY)));
  }
  // Set RowFilter or Prefix Filter if applicable.
  Filter exportFilter = getExportFilter(args);
  if (exportFilter!= null) {
      LOG.info("Setting Scan Filter for Export.");
    s.setFilter(exportFilter);
  }

  int batching = conf.getInt(EXPORT_BATCHING, -1);
  if (batching !=  -1){
    try {
      s.setBatch(batching);
    } catch (IncompatibleFilterException e) {
      LOG.error("Batching could not be set", e);
    }
  }
  LOG.info("versions=" + versions + ", starttime=" + startTime +
    ", endtime=" + endTime + ", keepDeletedCells=" + raw);
  return s;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:48,代码来源:Export.java


注:本文中的org.apache.hadoop.hbase.client.Scan.setBatch方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。