当前位置: 首页>>代码示例>>Java>>正文


Java WhileMatchFilter类代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.filter.WhileMatchFilter的典型用法代码示例。如果您正苦于以下问题:Java WhileMatchFilter类的具体用法?Java WhileMatchFilter怎么用?Java WhileMatchFilter使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


WhileMatchFilter类属于org.apache.hadoop.hbase.filter包,在下文中一共展示了WhileMatchFilter类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testRow

import org.apache.hadoop.hbase.filter.WhileMatchFilter; //导入依赖的package包/类
@Override
void testRow(final int i) throws IOException {
  Scan scan = new Scan(getRandomRow(this.rand, opts.totalRows));
  scan.setCaching(opts.caching);
  FilterList list = new FilterList();
  if (opts.addColumns) {
    scan.addColumn(FAMILY_NAME, QUALIFIER_NAME);
  } else {
    scan.addFamily(FAMILY_NAME);
  }
  if (opts.filterAll) {
    list.addFilter(new FilterAllFilter());
  }
  list.addFilter(new WhileMatchFilter(new PageFilter(120)));
  scan.setFilter(list);
  ResultScanner s = this.table.getScanner(scan);
  for (Result rr; (rr = s.next()) != null;) {
    updateValueSize(rr);
  }
  s.close();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:22,代码来源:PerformanceEvaluation.java

示例2: testRow

import org.apache.hadoop.hbase.filter.WhileMatchFilter; //导入依赖的package包/类
@Override
void testRow(final int i) throws IOException {
  Scan scan = new Scan(getRandomRow(this.rand, opts.totalRows));
  FilterList list = new FilterList();
  scan.addColumn(FAMILY_NAME, QUALIFIER_NAME);
  if (opts.filterAll) {
    list.addFilter(new FilterAllFilter());
  }
  list.addFilter(new WhileMatchFilter(new PageFilter(120)));
  scan.setFilter(list);
  ResultScanner s = this.table.getScanner(scan);
  for (Result rr; (rr = s.next()) != null;) {
    updateValueSize(rr);
  }
  s.close();
}
 
开发者ID:grokcoder,项目名称:pbase,代码行数:17,代码来源:PerformanceEvaluation.java

示例3: testRow

import org.apache.hadoop.hbase.filter.WhileMatchFilter; //导入依赖的package包/类
@Override
void testRow(final int i) throws IOException {
  Scan scan = new Scan().withStartRow(getRandomRow(this.rand, opts.totalRows))
      .setCaching(opts.caching).setCacheBlocks(opts.cacheBlocks)
      .setAsyncPrefetch(opts.asyncPrefetch).setReadType(opts.scanReadType);
  FilterList list = new FilterList();
  if (opts.addColumns) {
    scan.addColumn(FAMILY_NAME, QUALIFIER_NAME);
  } else {
    scan.addFamily(FAMILY_NAME);
  }
  if (opts.filterAll) {
    list.addFilter(new FilterAllFilter());
  }
  list.addFilter(new WhileMatchFilter(new PageFilter(120)));
  scan.setFilter(list);
  ResultScanner s = this.table.getScanner(scan);
  for (Result rr; (rr = s.next()) != null;) {
    updateValueSize(rr);
  }
  s.close();
}
 
开发者ID:apache,项目名称:hbase,代码行数:23,代码来源:PerformanceEvaluation.java

示例4: scannerOpenWithPrefix

import org.apache.hadoop.hbase.filter.WhileMatchFilter; //导入依赖的package包/类
@Override
public int scannerOpenWithPrefix(ByteBuffer tableName,
                                 ByteBuffer startAndPrefix,
                                 List<ByteBuffer> columns)
    throws IOError, TException {
  try {
    HTable table = getTable(tableName);
    Scan scan = new Scan(getBytes(startAndPrefix));
    Filter f = new WhileMatchFilter(
        new PrefixFilter(getBytes(startAndPrefix)));
    scan.setFilter(f);
    if(columns != null && columns.size() != 0) {
      for(ByteBuffer column : columns) {
        byte [][] famQf = KeyValue.parseColumn(getBytes(column));
        if(famQf.length == 1) {
          scan.addFamily(famQf[0]);
        } else {
          scan.addColumn(famQf[0], famQf[1]);
        }
      }
    }
    return addScanner(table.getScanner(scan));
  } catch (IOException e) {
    throw new IOError(e.getMessage());
  }
}
 
开发者ID:lifeng5042,项目名称:RStore,代码行数:27,代码来源:ThriftServer.java

示例5: getFlow

import org.apache.hadoop.hbase.filter.WhileMatchFilter; //导入依赖的package包/类
/**
 * Returns the {@link Flow} instance matching the application ID and run ID.
 *
 * @param cluster the cluster identifier
 * @param user the user running the jobs
 * @param appId the application description
 * @param runId the specific run ID for the flow
 * @param populateTasks whether or not to populate the task details for each
 *          job
 * @return
 */
public Flow getFlow(String cluster, String user, String appId, long runId,
    boolean populateTasks) throws IOException {
  Flow flow = null;

  byte[] startRow = ByteUtil.join(Constants.SEP_BYTES, Bytes.toBytes(cluster),
      Bytes.toBytes(user), Bytes.toBytes(appId),
      Bytes.toBytes(FlowKey.encodeRunId(runId)), Constants.EMPTY_BYTES);

  LOG.info(
      "Reading job_history rows start at " + Bytes.toStringBinary(startRow));
  Scan scan = new Scan();
  // start scanning history at cluster!user!app!run!
  scan.setStartRow(startRow);
  // require that all results match this flow prefix
  scan.setFilter(new WhileMatchFilter(new PrefixFilter(startRow)));

  List<Flow> flows = createFromResults(scan, populateTasks, 1);
  if (flows.size() > 0) {
    flow = flows.get(0);
  }

  return flow;
}
 
开发者ID:twitter,项目名称:hraven,代码行数:35,代码来源:JobHistoryService.java

示例6: getFlowByJobID

import org.apache.hadoop.hbase.filter.WhileMatchFilter; //导入依赖的package包/类
/**
 * Returns the {@link Flow} instance containing the given job ID.
 *
 * @param cluster the cluster identifier
 * @param jobId the job identifier
 * @return
 */
public Flow getFlowByJobID(String cluster, String jobId,
    boolean populateTasks) throws IOException {
  Flow flow = null;
  JobKey key = idService.getJobKeyById(new QualifiedJobId(cluster, jobId));
  if (key != null) {
    byte[] startRow =
        ByteUtil.join(Constants.SEP_BYTES, Bytes.toBytes(key.getCluster()),
            Bytes.toBytes(key.getUserName()), Bytes.toBytes(key.getAppId()),
            Bytes.toBytes(key.getEncodedRunId()), Constants.EMPTY_BYTES);

    LOG.info("Reading job_history rows start at "
        + Bytes.toStringBinary(startRow));
    Scan scan = new Scan();
    // start scanning history at cluster!user!app!run!
    scan.setStartRow(startRow);
    // require that all results match this flow prefix
    scan.setFilter(new WhileMatchFilter(new PrefixFilter(startRow)));

    List<Flow> flows = createFromResults(scan, populateTasks, 1);
    if (flows.size() > 0) {
      flow = flows.get(0);
    }
  }
  return flow;
}
 
开发者ID:twitter,项目名称:hraven,代码行数:33,代码来源:JobHistoryService.java

示例7: createFlowScan

import org.apache.hadoop.hbase.filter.WhileMatchFilter; //导入依赖的package包/类
/**
 * creates a scan for flow data
 * @param rowPrefix - start row prefix
 * @param limit - limit on scanned results
 * @param version - version to match
 * @return Scan
 */
private Scan createFlowScan(byte[] rowPrefix, int limit, String version) {
  Scan scan = new Scan();
  scan.setStartRow(rowPrefix);

  // using a large scanner caching value with a small limit can mean we scan a
  // lot more data than necessary, so lower the caching for low limits
  scan.setCaching(Math.min(limit, defaultScannerCaching));
  // require that all rows match the prefix we're looking for
  Filter prefixFilter = new WhileMatchFilter(new PrefixFilter(rowPrefix));
  // if version is passed, restrict the rows returned to that version
  if (version != null && version.length() > 0) {
    FilterList filters = new FilterList(FilterList.Operator.MUST_PASS_ALL);
    filters.addFilter(prefixFilter);
    filters.addFilter(new SingleColumnValueFilter(Constants.INFO_FAM_BYTES,
        Constants.VERSION_COLUMN_BYTES, CompareFilter.CompareOp.EQUAL,
        Bytes.toBytes(version)));
    scan.setFilter(filters);
  } else {
    scan.setFilter(prefixFilter);
  }
  return scan;
}
 
开发者ID:twitter,项目名称:hraven,代码行数:30,代码来源:JobHistoryService.java

示例8: scannerOpenWithPrefix

import org.apache.hadoop.hbase.filter.WhileMatchFilter; //导入依赖的package包/类
@Override
public int scannerOpenWithPrefix(ByteBuffer tableName,
                                 ByteBuffer startAndPrefix,
                                 List<ByteBuffer> columns,
    Map<ByteBuffer, ByteBuffer> attributes)
    throws IOError, TException {

  Table table = null;
  try {
    table = getTable(tableName);
    Scan scan = new Scan(getBytes(startAndPrefix));
    addAttributes(scan, attributes);
    Filter f = new WhileMatchFilter(
        new PrefixFilter(getBytes(startAndPrefix)));
    scan.setFilter(f);
    if (columns != null && columns.size() != 0) {
      for(ByteBuffer column : columns) {
        byte [][] famQf = KeyValue.parseColumn(getBytes(column));
        if(famQf.length == 1) {
          scan.addFamily(famQf[0]);
        } else {
          scan.addColumn(famQf[0], famQf[1]);
        }
      }
    }
    return addScanner(table.getScanner(scan), false);
  } catch (IOException e) {
    LOG.warn(e.getMessage(), e);
    throw new IOError(Throwables.getStackTraceAsString(e));
  } finally{
    closeTable(table);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:34,代码来源:ThriftServerRunner.java

示例9: testRow

import org.apache.hadoop.hbase.filter.WhileMatchFilter; //导入依赖的package包/类
@Override
void testRow(final int i) throws IOException {
  Scan scan = new Scan(getRandomRow(this.rand, this.totalRows));
  scan.addColumn(FAMILY_NAME, QUALIFIER_NAME);
  scan.setFilter(new WhileMatchFilter(new PageFilter(120)));
  ResultScanner s = this.table.getScanner(scan);
  s.close();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:9,代码来源:PerformanceEvaluation.java

示例10: scannerOpenWithPrefix

import org.apache.hadoop.hbase.filter.WhileMatchFilter; //导入依赖的package包/类
@Override
public int scannerOpenWithPrefix(ByteBuffer tableName,
                                 ByteBuffer startAndPrefix,
                                 List<ByteBuffer> columns,
    Map<ByteBuffer, ByteBuffer> attributes)
    throws IOError, TException {
  try {
    HTable table = getTable(tableName);
    Scan scan = new Scan(getBytes(startAndPrefix));
    addAttributes(scan, attributes);
    Filter f = new WhileMatchFilter(
        new PrefixFilter(getBytes(startAndPrefix)));
    scan.setFilter(f);
    if (columns != null && columns.size() != 0) {
      for(ByteBuffer column : columns) {
        byte [][] famQf = KeyValue.parseColumn(getBytes(column));
        if(famQf.length == 1) {
          scan.addFamily(famQf[0]);
        } else {
          scan.addColumn(famQf[0], famQf[1]);
        }
      }
    }
    return addScanner(table.getScanner(scan), false);
  } catch (IOException e) {
    LOG.warn(e.getMessage(), e);
    throw new IOError(e.getMessage());
  }
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:30,代码来源:ThriftServerRunner.java

示例11: testRow

import org.apache.hadoop.hbase.filter.WhileMatchFilter; //导入依赖的package包/类
@Override
void testRow(final int i) throws IOException {
  Scan scan = new Scan(getRandomRow(this.rand, this.totalRows));
  scan.addColumn(FAMILY_NAME, QUALIFIER_NAME);
  scan.setFilter(new WhileMatchFilter(new PageFilter(120)));
  ResultScanner s = this.table.getScanner(scan);
  for (Result rr; (rr = s.next()) != null;) ;
  s.close();
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:10,代码来源:PerformanceEvaluation.java

示例12: testRow

import org.apache.hadoop.hbase.filter.WhileMatchFilter; //导入依赖的package包/类
@Override
void testRow(final int i) throws IOException {
  Scan scan = new Scan(getRandomRow(this.rand, this.totalRows));
  scan.addColumn(FAMILY_NAME, QUALIFIER_NAME);
  scan.setFilter(new WhileMatchFilter(new PageFilter(120)));
  ResultScanner s = this.table.getScanner(scan);
  //int count = 0;
  for (Result rr = null; (rr = s.next()) != null;) {
    // LOG.info("" + count++ + " " + rr.toString());
  }
  s.close();
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:13,代码来源:PerformanceEvaluation.java

示例13: testRow

import org.apache.hadoop.hbase.filter.WhileMatchFilter; //导入依赖的package包/类
@Override
void testRow(final int i) throws IOException {
  Scan scan = new Scan(getRandomRow(this.rand, opts.totalRows));
  FilterList list = new FilterList();
  scan.addColumn(FAMILY_NAME, QUALIFIER_NAME);
  if (opts.filterAll) {
    list.addFilter(new FilterAllFilter());
  }
  list.addFilter(new WhileMatchFilter(new PageFilter(120)));
  scan.setFilter(list);
  ResultScanner s = this.table.getScanner(scan);
  for (Result rr; (rr = s.next()) != null;) ;
  s.close();
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:15,代码来源:PerformanceEvaluation.java

示例14: scannerOpenWithPrefix

import org.apache.hadoop.hbase.filter.WhileMatchFilter; //导入依赖的package包/类
@Override
public int scannerOpenWithPrefix(ByteBuffer tableName,
                                 ByteBuffer startAndPrefix,
                                 List<ByteBuffer> columns,
    Map<ByteBuffer, ByteBuffer> attributes)
    throws IOError, TException {

  Table table = null;
  try {
    table = getTable(tableName);
    Scan scan = new Scan(getBytes(startAndPrefix));
    addAttributes(scan, attributes);
    Filter f = new WhileMatchFilter(
        new PrefixFilter(getBytes(startAndPrefix)));
    scan.setFilter(f);
    if (columns != null && columns.size() != 0) {
      for(ByteBuffer column : columns) {
        byte [][] famQf = CellUtil.parseColumn(getBytes(column));
        if(famQf.length == 1) {
          scan.addFamily(famQf[0]);
        } else {
          scan.addColumn(famQf[0], famQf[1]);
        }
      }
    }
    return addScanner(table.getScanner(scan), false);
  } catch (IOException e) {
    LOG.warn(e.getMessage(), e);
    throw getIOError(e);
  } finally{
    closeTable(table);
  }
}
 
开发者ID:apache,项目名称:hbase,代码行数:34,代码来源:ThriftServerRunner.java

示例15: getTaskScan

import org.apache.hadoop.hbase.filter.WhileMatchFilter; //导入依赖的package包/类
/**
 * Returns a Scan instance to retrieve all the task rows for a given job from
 * the job_history_task table.
 * @param jobKey the job key to match for all task rows
 * @return a {@code Scan} instance for the job_history_task table
 */
private Scan getTaskScan(JobKey jobKey) {
  byte[] startKey =
      Bytes.add(jobKeyConv.toBytes(jobKey), Constants.SEP_BYTES);
  Scan scan = new Scan();
  scan.setStartRow(startKey);
  // only return tasks for this job
  scan.setFilter(new WhileMatchFilter(new PrefixFilter(startKey)));
  // expect a lot of tasks on average
  scan.setCaching(500);
  return scan;
}
 
开发者ID:twitter,项目名称:hraven,代码行数:18,代码来源:JobHistoryService.java


注:本文中的org.apache.hadoop.hbase.filter.WhileMatchFilter类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。