當前位置: 首頁>>代碼示例>>Java>>正文


Java Scan.setTimeRange方法代碼示例

本文整理匯總了Java中org.apache.hadoop.hbase.client.Scan.setTimeRange方法的典型用法代碼示例。如果您正苦於以下問題:Java Scan.setTimeRange方法的具體用法?Java Scan.setTimeRange怎麽用?Java Scan.setTimeRange使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.apache.hadoop.hbase.client.Scan的用法示例。


在下文中一共展示了Scan.setTimeRange方法的13個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: getConfiguredScanForJob

import org.apache.hadoop.hbase.client.Scan; //導入方法依賴的package包/類
private static Scan getConfiguredScanForJob(Configuration conf, String[] args) throws IOException {
  Scan s = new Scan();
  // Set Scan Versions
  s.setMaxVersions(Integer.MAX_VALUE);
  s.setCacheBlocks(false);
  // Set Scan Column Family
  if (conf.get(TableInputFormat.SCAN_COLUMN_FAMILY) != null) {
    s.addFamily(Bytes.toBytes(conf.get(TableInputFormat.SCAN_COLUMN_FAMILY)));
  }
  // Set RowFilter or Prefix Filter if applicable.
  Filter rowFilter = getRowFilter(args);
  if (rowFilter!= null) {
    LOG.info("Setting Row Filter for counter.");
    s.setFilter(rowFilter);
  }
  // Set TimeRange if defined
  long timeRange[] = getTimeRange(args);
  if (timeRange != null) {
    LOG.info("Setting TimeRange for counter.");
    s.setTimeRange(timeRange[0], timeRange[1]);
  }
  return s;
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:24,代碼來源:CellCounter.java

示例2: initScan

import org.apache.hadoop.hbase.client.Scan; //導入方法依賴的package包/類
Scan initScan() throws IOException {
  Scan scan = new Scan();
  scan.setCacheBlocks(false);
  if (startTime != 0 || endTime != 0) {
    scan.setTimeRange(startTime, endTime == 0 ? HConstants.LATEST_TIMESTAMP : endTime);
  }
  if (scanBatch > 0) {
    scan.setBatch(scanBatch);
  }
  if (versions >= 0) {
    scan.setMaxVersions(versions);
  }
  if (!isTableStartRow(startRow)) {
    scan.setStartRow(startRow);
  }
  if (!isTableEndRow(stopRow)) {
    scan.setStopRow(stopRow);
  }
  if(families != null) {
    for(String fam : families.split(",")) {
      scan.addFamily(Bytes.toBytes(fam));
    }
  }
  return scan;
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:26,代碼來源:HashTable.java

示例3: scannerOpenTs

import org.apache.hadoop.hbase.client.Scan; //導入方法依賴的package包/類
@Override
public int scannerOpenTs(ByteBuffer tableName, ByteBuffer startRow,
    List<ByteBuffer> columns, long timestamp,
    Map<ByteBuffer, ByteBuffer> attributes) throws IOError, TException {

  Table table = null;
  try {
    table = getTable(tableName);
    Scan scan = new Scan(getBytes(startRow));
    addAttributes(scan, attributes);
    scan.setTimeRange(0, timestamp);
    if (columns != null && columns.size() != 0) {
      for (ByteBuffer column : columns) {
        byte [][] famQf = KeyValue.parseColumn(getBytes(column));
        if(famQf.length == 1) {
          scan.addFamily(famQf[0]);
        } else {
          scan.addColumn(famQf[0], famQf[1]);
        }
      }
    }
    return addScanner(table.getScanner(scan), false);
  } catch (IOException e) {
    LOG.warn(e.getMessage(), e);
    throw new IOError(Throwables.getStackTraceAsString(e));
  } finally{
    closeTable(table);
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:30,代碼來源:ThriftServerRunner.java

示例4: scannerOpenWithStopTs

import org.apache.hadoop.hbase.client.Scan; //導入方法依賴的package包/類
@Override
public int scannerOpenWithStopTs(ByteBuffer tableName, ByteBuffer startRow,
    ByteBuffer stopRow, List<ByteBuffer> columns, long timestamp,
    Map<ByteBuffer, ByteBuffer> attributes)
    throws IOError, TException {

  Table table = null;
  try {
    table = getTable(tableName);
    Scan scan = new Scan(getBytes(startRow), getBytes(stopRow));
    addAttributes(scan, attributes);
    scan.setTimeRange(0, timestamp);
    if (columns != null && columns.size() != 0) {
      for (ByteBuffer column : columns) {
        byte [][] famQf = KeyValue.parseColumn(getBytes(column));
        if(famQf.length == 1) {
          scan.addFamily(famQf[0]);
        } else {
          scan.addColumn(famQf[0], famQf[1]);
        }
      }
    }
    scan.setTimeRange(0, timestamp);
    return addScanner(table.getScanner(scan), false);
  } catch (IOException e) {
    LOG.warn(e.getMessage(), e);
    throw new IOError(Throwables.getStackTraceAsString(e));
  } finally{
    closeTable(table);
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:32,代碼來源:ThriftServerRunner.java

示例5: getScanner

import org.apache.hadoop.hbase.client.Scan; //導入方法依賴的package包/類
public ScannerIncommon getScanner(byte [] family, byte [][] qualifiers,
  byte [] firstRow, long ts)
throws IOException {
  Scan scan = new Scan(firstRow);
  if(qualifiers == null || qualifiers.length == 0) {
    scan.addFamily(family);
  } else {
    for(int i=0; i<qualifiers.length; i++){
      scan.addColumn(HConstants.CATALOG_FAMILY, qualifiers[i]);
    }
  }
  scan.setTimeRange(0, ts);
  return new
    InternalScannerIncommon(region.getScanner(scan));
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:16,代碼來源:HBaseTestCase.java

示例6: createRowkeyQueueBySecondaryIndex

import org.apache.hadoop.hbase.client.Scan; //導入方法依賴的package包/類
/**
 * scan all index tables, common rowkeys will be saved in rowkeySet
 * can be optimized in 2 ways:
 * 1. scan index tables by the order of #CandidateRowkeys, similar to CCIndex
 * 2. scan index tables in parallel
 *
 * @throws IOException
 */
public static Queue<byte[]> createRowkeyQueueBySecondaryIndex(Connection conn,
    IndexTableRelation relation, Map<byte[], NavigableSet<byte[]>> familyMap,
    ScanRange.ScanRangeList rangeList) throws IOException {
  TreeSet<byte[]> rowkeySet = null;
  for (ScanRange range : rangeList.getRanges()) {
    Scan scan = new Scan();
    scan.setStartRow(range.getStart());
    scan.setStopRow(range.getStop());
    scan.setFamilyMap(familyMap);
    if (range.getStartTs() != -1 && range.getStopTs() != -1) {
      scan.setTimeRange(range.getStartTs(), range.getStopTs());
    }
    TableName tableName = relation.getIndexTableName(range.getFamily(), range.getQualifier());
    Table table = conn.getTable(tableName);
    ResultScanner scanner = table.getScanner(scan);
    Result res;
    TreeSet<byte[]> candidateSet = new TreeSet<>(Bytes.BYTES_COMPARATOR);
    while ((res = scanner.next()) != null) {
      candidateSet.add(IndexPutParser.parseIndexRowKey(res.getRow())[0]);
    }
    System.out.println("get " + candidateSet.size() + " candidate rowkeys from " + range);
    if (rowkeySet == null) {
      rowkeySet = candidateSet;
    } else {
      rowkeySet = getCommonSet(rowkeySet, candidateSet);
    }
    System.out.println("common key set size " + rowkeySet.size() + " after " + range);
    if (rowkeySet.isEmpty()) { // no commons keys at all, can ignore the rest index tables
      break;
    }
  }
  if (rowkeySet != null && !rowkeySet.isEmpty()) {
    Queue<byte[]> rowkeyQueue = new LinkedList<>();
    for (byte[] rowkey : rowkeySet)
      rowkeyQueue.add(rowkey);
    return rowkeyQueue;
  }
  return null;
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:48,代碼來源:GSScannerCaching.java

示例7: scanFromThrift

import org.apache.hadoop.hbase.client.Scan; //導入方法依賴的package包/類
public static Scan scanFromThrift(TScan in) throws IOException {
  Scan out = new Scan();

  if (in.isSetStartRow())
    out.setStartRow(in.getStartRow());
  if (in.isSetStopRow())
    out.setStopRow(in.getStopRow());
  if (in.isSetCaching())
    out.setCaching(in.getCaching());
  if (in.isSetMaxVersions()) {
    out.setMaxVersions(in.getMaxVersions());
  }

  if (in.isSetColumns()) {
    for (TColumn column : in.getColumns()) {
      if (column.isSetQualifier()) {
        out.addColumn(column.getFamily(), column.getQualifier());
      } else {
        out.addFamily(column.getFamily());
      }
    }
  }

  TTimeRange timeRange = in.getTimeRange();
  if (timeRange != null &&
      timeRange.isSetMinStamp() && timeRange.isSetMaxStamp()) {
    out.setTimeRange(timeRange.getMinStamp(), timeRange.getMaxStamp());
  }

  if (in.isSetBatchSize()) {
    out.setBatch(in.getBatchSize());
  }

  if (in.isSetFilterString()) {
    ParseFilter parseFilter = new ParseFilter();
    out.setFilter(parseFilter.parseFilterString(in.getFilterString()));
  }

  if (in.isSetAttributes()) {
    addAttributes(out,in.getAttributes());
  }
  
  if (in.isSetAuthorizations()) {
    out.setAuthorizations(new Authorizations(in.getAuthorizations().getLabels()));
  }

  if (in.isSetReversed()) {
    out.setReversed(in.isReversed());
  }

  return out;
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:53,代碼來源:ThriftUtilities.java

示例8: scannerOpenWithScan

import org.apache.hadoop.hbase.client.Scan; //導入方法依賴的package包/類
@Override
public int scannerOpenWithScan(ByteBuffer tableName, TScan tScan,
    Map<ByteBuffer, ByteBuffer> attributes)
    throws IOError {

  Table table = null;
  try {
    table = getTable(tableName);
    Scan scan = new Scan();
    addAttributes(scan, attributes);
    if (tScan.isSetStartRow()) {
      scan.setStartRow(tScan.getStartRow());
    }
    if (tScan.isSetStopRow()) {
      scan.setStopRow(tScan.getStopRow());
    }
    if (tScan.isSetTimestamp()) {
      scan.setTimeRange(0, tScan.getTimestamp());
    }
    if (tScan.isSetCaching()) {
      scan.setCaching(tScan.getCaching());
    }
    if (tScan.isSetBatchSize()) {
      scan.setBatch(tScan.getBatchSize());
    }
    if (tScan.isSetColumns() && tScan.getColumns().size() != 0) {
      for(ByteBuffer column : tScan.getColumns()) {
        byte [][] famQf = KeyValue.parseColumn(getBytes(column));
        if(famQf.length == 1) {
          scan.addFamily(famQf[0]);
        } else {
          scan.addColumn(famQf[0], famQf[1]);
        }
      }
    }
    if (tScan.isSetFilterString()) {
      ParseFilter parseFilter = new ParseFilter();
      scan.setFilter(
          parseFilter.parseFilterString(tScan.getFilterString()));
    }
    if (tScan.isSetReversed()) {
      scan.setReversed(tScan.isReversed());
    }
    return addScanner(table.getScanner(scan), tScan.sortColumns);
  } catch (IOException e) {
    LOG.warn(e.getMessage(), e);
    throw new IOError(Throwables.getStackTraceAsString(e));
  } finally{
    closeTable(table);
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:52,代碼來源:ThriftServerRunner.java

示例9: map

import org.apache.hadoop.hbase.client.Scan; //導入方法依賴的package包/類
/**
 * Map method that compares every scanned row with the equivalent from
 * a distant cluster.
 * @param row  The current table row key.
 * @param value  The columns.
 * @param context  The current context.
 * @throws IOException When something is broken with the data.
 */
@Override
public void map(ImmutableBytesWritable row, final Result value,
                Context context)
    throws IOException {
  if (replicatedScanner == null) {
    Configuration conf = context.getConfiguration();
    final Scan scan = new Scan();
    scan.setCaching(conf.getInt(TableInputFormat.SCAN_CACHEDROWS, 1));
    long startTime = conf.getLong(NAME + ".startTime", 0);
    long endTime = conf.getLong(NAME + ".endTime", Long.MAX_VALUE);
    String families = conf.get(NAME + ".families", null);
    if(families != null) {
      String[] fams = families.split(",");
      for(String fam : fams) {
        scan.addFamily(Bytes.toBytes(fam));
      }
    }
    scan.setTimeRange(startTime, endTime);
    int versions = conf.getInt(NAME+".versions", -1);
    LOG.info("Setting number of version inside map as: " + versions);
    if (versions >= 0) {
      scan.setMaxVersions(versions);
    }

    final TableSplit tableSplit = (TableSplit)(context.getInputSplit());
    HConnectionManager.execute(new HConnectable<Void>(conf) {
      @Override
      public Void connect(HConnection conn) throws IOException {
        String zkClusterKey = conf.get(NAME + ".peerQuorumAddress");
        Configuration peerConf = HBaseConfiguration.createClusterConf(conf,
            zkClusterKey, PEER_CONFIG_PREFIX);

        TableName tableName = TableName.valueOf(conf.get(NAME + ".tableName"));
        replicatedTable = new HTable(peerConf, tableName);
        scan.setStartRow(value.getRow());
        scan.setStopRow(tableSplit.getEndRow());
        replicatedScanner = replicatedTable.getScanner(scan);
        return null;
      }
    });
    currentCompareRowInPeerTable = replicatedScanner.next();
  }
  while (true) {
    if (currentCompareRowInPeerTable == null) {
      // reach the region end of peer table, row only in source table
      logFailRowAndIncreaseCounter(context, Counters.ONLY_IN_SOURCE_TABLE_ROWS, value);
      break;
    }
    int rowCmpRet = Bytes.compareTo(value.getRow(), currentCompareRowInPeerTable.getRow());
    if (rowCmpRet == 0) {
      // rowkey is same, need to compare the content of the row
      try {
        Result.compareResults(value, currentCompareRowInPeerTable);
        context.getCounter(Counters.GOODROWS).increment(1);
      } catch (Exception e) {
        logFailRowAndIncreaseCounter(context, Counters.CONTENT_DIFFERENT_ROWS, value);
        LOG.error("Exception while comparing row : " + e);
      }
      currentCompareRowInPeerTable = replicatedScanner.next();
      break;
    } else if (rowCmpRet < 0) {
      // row only exists in source table
      logFailRowAndIncreaseCounter(context, Counters.ONLY_IN_SOURCE_TABLE_ROWS, value);
      break;
    } else {
      // row only exists in peer table
      logFailRowAndIncreaseCounter(context, Counters.ONLY_IN_PEER_TABLE_ROWS,
        currentCompareRowInPeerTable);
      currentCompareRowInPeerTable = replicatedScanner.next();
    }
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:81,代碼來源:VerifyReplication.java

示例10: createSubmittableJob

import org.apache.hadoop.hbase.client.Scan; //導入方法依賴的package包/類
/**
 * Sets up the actual job.
 *
 * @param conf  The current configuration.
 * @param args  The command line parameters.
 * @return The newly created job.
 * @throws java.io.IOException When setting up the job fails.
 */
public static Job createSubmittableJob(Configuration conf, String[] args)
throws IOException {
  if (!doCommandLine(args)) {
    return null;
  }
  if (!conf.getBoolean(HConstants.REPLICATION_ENABLE_KEY,
      HConstants.REPLICATION_ENABLE_DEFAULT)) {
    throw new IOException("Replication needs to be enabled to verify it.");
  }
  conf.set(NAME+".peerId", peerId);
  conf.set(NAME+".tableName", tableName);
  conf.setLong(NAME+".startTime", startTime);
  conf.setLong(NAME+".endTime", endTime);
  if (families != null) {
    conf.set(NAME+".families", families);
  }

  Pair<ReplicationPeerConfig, Configuration> peerConfigPair = getPeerQuorumConfig(conf);
  ReplicationPeerConfig peerConfig = peerConfigPair.getFirst();
  String peerQuorumAddress = peerConfig.getClusterKey();
  LOG.info("Peer Quorum Address: " + peerQuorumAddress + ", Peer Configuration: " +
      peerConfig.getConfiguration());
  conf.set(NAME + ".peerQuorumAddress", peerQuorumAddress);
  HBaseConfiguration.setWithPrefix(conf, PEER_CONFIG_PREFIX,
      peerConfig.getConfiguration().entrySet());

  conf.setInt(NAME + ".versions", versions);
  LOG.info("Number of version: " + versions);

  Job job = new Job(conf, NAME + "_" + tableName);
  job.setJarByClass(VerifyReplication.class);

  Scan scan = new Scan();
  scan.setTimeRange(startTime, endTime);
  if (versions >= 0) {
    scan.setMaxVersions(versions);
    LOG.info("Number of versions set to " + versions);
  }
  if(families != null) {
    String[] fams = families.split(",");
    for(String fam : fams) {
      scan.addFamily(Bytes.toBytes(fam));
    }
  }
  TableMapReduceUtil.initTableMapperJob(tableName, scan,
      Verifier.class, null, null, job);

  Configuration peerClusterConf = peerConfigPair.getSecond();
  // Obtain the auth token from peer cluster
  TableMapReduceUtil.initCredentialsForCluster(job, peerClusterConf);

  job.setOutputFormatClass(NullOutputFormat.class);
  job.setNumReduceTasks(0);
  return job;
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:64,代碼來源:VerifyReplication.java

示例11: getConfiguredScanForJob

import org.apache.hadoop.hbase.client.Scan; //導入方法依賴的package包/類
private static Scan getConfiguredScanForJob(Configuration conf, String[] args) throws IOException {
  Scan s = new Scan();
  // Optional arguments.
  // Set Scan Versions
  int versions = args.length > 2? Integer.parseInt(args[2]): 1;
  s.setMaxVersions(versions);
  // Set Scan Range
  long startTime = args.length > 3? Long.parseLong(args[3]): 0L;
  long endTime = args.length > 4? Long.parseLong(args[4]): Long.MAX_VALUE;
  s.setTimeRange(startTime, endTime);
  // Set cache blocks
  s.setCacheBlocks(false);
  // set Start and Stop row
  if (conf.get(TableInputFormat.SCAN_ROW_START) != null) {
    s.setStartRow(Bytes.toBytes(conf.get(TableInputFormat.SCAN_ROW_START)));
  }
  if (conf.get(TableInputFormat.SCAN_ROW_STOP) != null) {
    s.setStopRow(Bytes.toBytes(conf.get(TableInputFormat.SCAN_ROW_STOP)));
  }
  // Set Scan Column Family
  boolean raw = Boolean.parseBoolean(conf.get(RAW_SCAN));
  if (raw) {
    s.setRaw(raw);
  }
  
  if (conf.get(TableInputFormat.SCAN_COLUMN_FAMILY) != null) {
    s.addFamily(Bytes.toBytes(conf.get(TableInputFormat.SCAN_COLUMN_FAMILY)));
  }
  // Set RowFilter or Prefix Filter if applicable.
  Filter exportFilter = getExportFilter(args);
  if (exportFilter!= null) {
      LOG.info("Setting Scan Filter for Export.");
    s.setFilter(exportFilter);
  }

  int batching = conf.getInt(EXPORT_BATCHING, -1);
  if (batching !=  -1){
    try {
      s.setBatch(batching);
    } catch (IncompatibleFilterException e) {
      LOG.error("Batching could not be set", e);
    }
  }
  LOG.info("versions=" + versions + ", starttime=" + startTime +
    ", endtime=" + endTime + ", keepDeletedCells=" + raw);
  return s;
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:48,代碼來源:Export.java

示例12: testWithoutKeepingDeletes

import org.apache.hadoop.hbase.client.Scan; //導入方法依賴的package包/類
/**
 * basic verification of existing behavior
 */
@Test
public void testWithoutKeepingDeletes() throws Exception {
  // KEEP_DELETED_CELLS is NOT enabled
  HTableDescriptor htd = hbu.createTableDescriptor(name.getMethodName(), 0, 3,
      HConstants.FOREVER, KeepDeletedCells.FALSE);
  HRegion region = hbu.createLocalHRegion(htd, null, null);

  long ts = EnvironmentEdgeManager.currentTime();
  Put p = new Put(T1, ts);
  p.add(c0, c0, T1);
  region.put(p);

  Get gOne = new Get(T1);
  gOne.setMaxVersions();
  gOne.setTimeRange(0L, ts + 1);
  Result rOne = region.get(gOne);
  assertFalse(rOne.isEmpty());


  Delete d = new Delete(T1, ts+2);
  d.deleteColumn(c0, c0, ts);
  region.delete(d);

  // "past" get does not see rows behind delete marker
  Get g = new Get(T1);
  g.setMaxVersions();
  g.setTimeRange(0L, ts+1);
  Result r = region.get(g);
  assertTrue(r.isEmpty());

  // "past" scan does not see rows behind delete marker
  Scan s = new Scan();
  s.setMaxVersions();
  s.setTimeRange(0L, ts+1);
  InternalScanner scanner = region.getScanner(s);
  List<Cell> kvs = new ArrayList<Cell>();
  while (scanner.next(kvs))
    ;
  assertTrue(kvs.isEmpty());

  // flushing and minor compaction keep delete markers
  region.flush(true);
  region.compact(false);
  assertEquals(1, countDeleteMarkers(region));
  region.compact(true);
  // major compaction deleted it
  assertEquals(0, countDeleteMarkers(region));

  HRegion.closeHRegion(region);
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:54,代碼來源:TestKeepDeletes.java

示例13: createRowkeyQueueBySecondaryIndex

import org.apache.hadoop.hbase.client.Scan; //導入方法依賴的package包/類
/**
 * scan all index tables, common rowkeys will be saved in rowkeySet
 * can be optimized in 2 ways:
 * 1. scan index tables by the order of #CandidateRowkeys, similar to CCIndex
 * 2. scan index tables in parallel
 *
 * @throws IOException
 */
public static Queue<byte[]> createRowkeyQueueBySecondaryIndex(Connection conn,
    IndexTableRelation relation, Map<byte[], NavigableSet<byte[]>> familyMap,
    ScanRange.ScanRangeList rangeList, Scan rawScan) throws IOException {
  TreeSet<byte[]> rowkeySet = null;
  long timeToMerge = 0;
  for (ScanRange range : rangeList.getRanges()) {
    Scan scan = new Scan();
    scan.setStartRow(range.getStart());
    scan.setStopRow(range.getStop());
    scan.setFamilyMap(familyMap);
    scan.setCaching(rawScan.getCaching());
    scan.setCacheBlocks(rawScan.getCacheBlocks());
    scan.setId(rawScan.getId());
    if (range.getStartTs() != -1 && range.getStopTs() != -1) {
      scan.setTimeRange(range.getStartTs(), range.getStopTs());
    }
    TableName tableName = relation.getIndexTableName(range.getFamily(), range.getQualifier());
    Table table = conn.getTable(tableName);
    ResultScanner scanner = table.getScanner(scan);
    Result res;
    long timeStart = System.currentTimeMillis();
    TreeSet<byte[]> candidateSet = new TreeSet<>(Bytes.BYTES_COMPARATOR);
    while ((res = scanner.next()) != null) {
      candidateSet.add(IndexPutParser.parseIndexRowKey(res.getRow())[0]);
    }
    System.out.println(String
        .format("get %d candidate rowkeys from %s in scan %s, cost %.2f seconds",
            candidateSet.size(), range.toString(), scan.getId(),
            (System.currentTimeMillis() - timeStart) / 1000.0));
    if (rowkeySet == null) {
      rowkeySet = candidateSet;
    } else {
      timeStart = System.currentTimeMillis();
      rowkeySet = getCommonSet(rowkeySet, candidateSet);
      timeToMerge += (System.currentTimeMillis() - timeStart);
    }
    System.out.println(
        "common key set size " + rowkeySet.size() + " after " + range + " in scan " + scan
            .getId());
    if (rowkeySet.isEmpty()) { // no commons keys at all, can ignore the rest index tables
      break;
    }
  }
  System.out.println(String
      .format("get %d result rowkeys in scan %s, cost %.2f seconds", rowkeySet.size(),
          rawScan.getId(), timeToMerge / 1000.0));
  if (rowkeySet != null && !rowkeySet.isEmpty()) {
    Queue<byte[]> rowkeyQueue = new LinkedList<>();
    for (byte[] rowkey : rowkeySet)
      rowkeyQueue.add(rowkey);
    return rowkeyQueue;
  }
  return null;
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:63,代碼來源:GSScanner.java


注:本文中的org.apache.hadoop.hbase.client.Scan.setTimeRange方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。