当前位置: 首页>>代码示例>>Java>>正文


Java PrefixFilter类代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.filter.PrefixFilter的典型用法代码示例。如果您正苦于以下问题:Java PrefixFilter类的具体用法?Java PrefixFilter怎么用?Java PrefixFilter使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


PrefixFilter类属于org.apache.hadoop.hbase.filter包,在下文中一共展示了PrefixFilter类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testScanWithFilter

import org.apache.hadoop.hbase.filter.PrefixFilter; //导入依赖的package包/类
@Test
public void testScanWithFilter() throws IOException {
  prepareScanData(TRANSACTION_COLUMNS);
  writeData(COLUMN, lastTs(prewriteTs), ANOTHER_VALUE);
  ValueFilter valueFilter = new ValueFilter(CompareOp.EQUAL, new BinaryComparator(ANOTHER_VALUE));
  PrefixFilter prefixFilter = new PrefixFilter(ANOTHER_ROW);
  FilterList filterList = new FilterList();
  filterList.addFilter(valueFilter);
  filterList.addFilter(prefixFilter);
  ThemisScanner scanner = prepareScanner(TRANSACTION_COLUMNS, filterList);
  checkAndCloseScanner(scanner);
  
  filterList = new FilterList(Operator.MUST_PASS_ONE);
  filterList.addFilter(valueFilter);
  filterList.addFilter(prefixFilter);
  scanner = prepareScanner(TRANSACTION_COLUMNS, filterList);
  checkScanRow(new ColumnCoordinate[]{COLUMN_WITH_ANOTHER_ROW}, scanner.next());
  Assert.assertEquals(1, scanner.next().size());
  checkAndCloseScanner(scanner);
}
 
开发者ID:XiaoMi,项目名称:themis,代码行数:21,代码来源:TestThemisScanner.java

示例2: buildScanner

import org.apache.hadoop.hbase.filter.PrefixFilter; //导入依赖的package包/类
private ResultScanner buildScanner(String keyPrefix, String value, Table ht)
    throws IOException {
  // OurFilterList allFilters = new OurFilterList();
  FilterList allFilters = new FilterList(/* FilterList.Operator.MUST_PASS_ALL */);
  allFilters.addFilter(new PrefixFilter(Bytes.toBytes(keyPrefix)));
  SingleColumnValueFilter filter = new SingleColumnValueFilter(Bytes
      .toBytes("trans-tags"), Bytes.toBytes("qual2"), CompareOp.EQUAL, Bytes
      .toBytes(value));
  filter.setFilterIfMissing(true);
  allFilters.addFilter(filter);

  // allFilters.addFilter(new
  // RowExcludingSingleColumnValueFilter(Bytes.toBytes("trans-tags"),
  // Bytes.toBytes("qual2"), CompareOp.EQUAL, Bytes.toBytes(value)));

  Scan scan = new Scan();
  scan.addFamily(Bytes.toBytes("trans-blob"));
  scan.addFamily(Bytes.toBytes("trans-type"));
  scan.addFamily(Bytes.toBytes("trans-date"));
  scan.addFamily(Bytes.toBytes("trans-tags"));
  scan.addFamily(Bytes.toBytes("trans-group"));
  scan.setFilter(allFilters);

  return ht.getScanner(scan);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:26,代码来源:TestFromClientSide.java

示例3: buildScanner

import org.apache.hadoop.hbase.filter.PrefixFilter; //导入依赖的package包/类
private InternalScanner buildScanner(String keyPrefix, String value, HRegion r)
    throws IOException {
  // Defaults FilterList.Operator.MUST_PASS_ALL.
  FilterList allFilters = new FilterList();
  allFilters.addFilter(new PrefixFilter(Bytes.toBytes(keyPrefix)));
  // Only return rows where this column value exists in the row.
  SingleColumnValueFilter filter = new SingleColumnValueFilter(Bytes.toBytes("trans-tags"),
      Bytes.toBytes("qual2"), CompareOp.EQUAL, Bytes.toBytes(value));
  filter.setFilterIfMissing(true);
  allFilters.addFilter(filter);
  Scan scan = new Scan();
  scan.addFamily(Bytes.toBytes("trans-blob"));
  scan.addFamily(Bytes.toBytes("trans-type"));
  scan.addFamily(Bytes.toBytes("trans-date"));
  scan.addFamily(Bytes.toBytes("trans-tags"));
  scan.addFamily(Bytes.toBytes("trans-group"));
  scan.setFilter(allFilters);
  return r.getScanner(scan);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:20,代码来源:TestHRegion.java

示例4: getVertexIndexScanWithLimit

import org.apache.hadoop.hbase.filter.PrefixFilter; //导入依赖的package包/类
private Scan getVertexIndexScanWithLimit(String label, boolean isUnique, String key, Object from, int limit, boolean reversed) {
    byte[] prefix = serializeForRead(label, isUnique, key, null);
    byte[] startRow = from != null
            ? serializeForRead(label, isUnique, key, from)
            : prefix;
    byte[] stopRow = HConstants.EMPTY_END_ROW;
    if (graph.configuration().getInstanceType() == HBaseGraphConfiguration.InstanceType.BIGTABLE) {
        if (reversed) {
            throw new UnsupportedOperationException("Reverse scans not supported by Bigtable");
        } else {
            // PrefixFilter in Bigtable does not automatically stop
            // See https://github.com/GoogleCloudPlatform/cloud-bigtable-client/issues/1087
            stopRow = HBaseGraphUtils.incrementBytes(prefix);
        }
    }
    if (reversed) startRow = HBaseGraphUtils.incrementBytes(startRow);
    Scan scan = new Scan(startRow, stopRow);
    FilterList filterList = new FilterList();
    filterList.addFilter(new PrefixFilter(prefix));
    filterList.addFilter(new PageFilter(limit));
    scan.setFilter(filterList);
    scan.setReversed(reversed);
    return scan;
}
 
开发者ID:rayokota,项目名称:hgraphdb,代码行数:25,代码来源:VertexIndexModel.java

示例5: buildScanner

import org.apache.hadoop.hbase.filter.PrefixFilter; //导入依赖的package包/类
private ResultScanner buildScanner(String keyPrefix, String value, HTable ht)
    throws IOException {
  // OurFilterList allFilters = new OurFilterList();
  FilterList allFilters = new FilterList(/* FilterList.Operator.MUST_PASS_ALL */);
  allFilters.addFilter(new PrefixFilter(Bytes.toBytes(keyPrefix)));
  SingleColumnValueFilter filter = new SingleColumnValueFilter(Bytes
      .toBytes("trans-tags"), Bytes.toBytes("qual2"), CompareOp.EQUAL, Bytes
      .toBytes(value));
  filter.setFilterIfMissing(true);
  allFilters.addFilter(filter);

  // allFilters.addFilter(new
  // RowExcludingSingleColumnValueFilter(Bytes.toBytes("trans-tags"),
  // Bytes.toBytes("qual2"), CompareOp.EQUAL, Bytes.toBytes(value)));

  Scan scan = new Scan();
  scan.addFamily(Bytes.toBytes("trans-blob"));
  scan.addFamily(Bytes.toBytes("trans-type"));
  scan.addFamily(Bytes.toBytes("trans-date"));
  scan.addFamily(Bytes.toBytes("trans-tags"));
  scan.addFamily(Bytes.toBytes("trans-group"));
  scan.setFilter(allFilters);

  return ht.getScanner(scan);
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:26,代码来源:TestFromClientSide.java

示例6: buildScanner

import org.apache.hadoop.hbase.filter.PrefixFilter; //导入依赖的package包/类
private InternalScanner buildScanner(String keyPrefix, String value, HRegion r)
throws IOException {
  // Defaults FilterList.Operator.MUST_PASS_ALL.
  FilterList allFilters = new FilterList();
  allFilters.addFilter(new PrefixFilter(Bytes.toBytes(keyPrefix)));
  // Only return rows where this column value exists in the row.
  SingleColumnValueFilter filter =
    new SingleColumnValueFilter(Bytes.toBytes("trans-tags"),
      Bytes.toBytes("qual2"), CompareOp.EQUAL, Bytes.toBytes(value));
  filter.setFilterIfMissing(true);
  allFilters.addFilter(filter);
  Scan scan = new Scan();
  scan.addFamily(Bytes.toBytes("trans-blob"));
  scan.addFamily(Bytes.toBytes("trans-type"));
  scan.addFamily(Bytes.toBytes("trans-date"));
  scan.addFamily(Bytes.toBytes("trans-tags"));
  scan.addFamily(Bytes.toBytes("trans-group"));
  scan.setFilter(allFilters);
  return r.getScanner(scan);
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:21,代码来源:TestHRegion.java

示例7: testPrefixAddedAsRowRegex

import org.apache.hadoop.hbase.filter.PrefixFilter; //导入依赖的package包/类
@Test
public void testPrefixAddedAsRowRegex() throws IOException {
  PrefixFilterAdapter adapter = new PrefixFilterAdapter();
  String prefix = "Foobar";
  PrefixFilter filter = new PrefixFilter(Bytes.toBytes(prefix));
  Scan emptyScan = new Scan();
  FilterAdapterContext context = new FilterAdapterContext(emptyScan);

  byte[] prefixRegex = Bytes.toBytes(prefix + "\\C*");
  Assert.assertEquals(
      RowFilter.newBuilder()
          .setRowKeyRegexFilter(
              ByteString.copyFrom(prefixRegex))
          .build(),
      adapter.adapt(context, filter));
}
 
开发者ID:dmmcerlean,项目名称:cloud-bigtable-client,代码行数:17,代码来源:TestPrefixFilterAdapter.java

示例8: testPrefixFilter

import org.apache.hadoop.hbase.filter.PrefixFilter; //导入依赖的package包/类
@Test
public void testPrefixFilter() throws IOException {
  String prefix = "testPrefixFilter";
  int rowCount = 10;
  byte[][] rowKeys = dataHelper.randomData(prefix, rowCount);
  List<Put> puts = new ArrayList<>();
  for (byte[] rowKey : rowKeys) {
    puts.add(
        new Put(rowKey)
            .addColumn(COLUMN_FAMILY, Bytes.toBytes("q1"), Bytes.toBytes("val1")));
  }
  Table table = getConnection().getTable(TABLE_NAME);
  table.put(puts);

  PrefixFilter filter = new PrefixFilter(Bytes.toBytes(prefix));
  Scan scan = new Scan().addFamily(COLUMN_FAMILY).setFilter(filter);
  ResultScanner scanner = table.getScanner(scan);
  Result[] results = scanner.next(rowCount + 2);
  Assert.assertEquals(rowCount, results.length);
  Arrays.sort(rowKeys, Bytes.BYTES_COMPARATOR);
  // Both results[] and rowKeys[] should be in the same order now. Iterate over both
  // and verify rowkeys.
  for (int i = 0; i < rowCount; i++) {
    Assert.assertArrayEquals(rowKeys[i], results[i].getRow());
  }
}
 
开发者ID:dmmcerlean,项目名称:cloud-bigtable-client,代码行数:27,代码来源:TestFilters.java

示例9: testConstructDataRowGet

import org.apache.hadoop.hbase.filter.PrefixFilter; //导入依赖的package包/类
@Test
public void testConstructDataRowGet() throws IOException {
  ThemisGet get = new ThemisGet(ROW);
  get.addFamily(FAMILY);
  get.addColumn(ANOTHER_FAMILY, QUALIFIER);
  get.setFilter(new PrefixFilter(ROW));
  get.setCacheBlocks(true);
  
  ThemisGet actual = IndexScanner.constructDataRowGet(ANOTHER_ROW, get);
  Assert.assertArrayEquals(ANOTHER_ROW, actual.getRow());
  Assert
      .assertArrayEquals(QUALIFIER, actual.getFamilyMap().get(ANOTHER_FAMILY).iterator().next());
  Assert.assertTrue(actual.getFamilyMap().containsKey(FAMILY));
  Assert.assertNull(actual.getFamilyMap().get(FAMILY));
  Assert.assertTrue(actual.getCacheBlocks());
}
 
开发者ID:XiaoMi,项目名称:themis,代码行数:17,代码来源:TestIndexScanner.java

示例10: testGetWithRowkeyFilter

import org.apache.hadoop.hbase.filter.PrefixFilter; //导入依赖的package包/类
@Test
public void testGetWithRowkeyFilter() throws IOException {
  commitColumnsWithDifferentTs();
  Get get = createGetForDifferentTs();
  get.setFilter(new PrefixFilter(ROW));
  Result iResult = cpClient.themisGet(TABLENAME, get, prewriteTs);
  checkGetResultForDifferentTs(iResult);
  get.setFilter(new PrefixFilter(ANOTHER_ROW));
  iResult = cpClient.themisGet(TABLENAME, get, prewriteTs);
  Assert.assertTrue(iResult.isEmpty());
  FilterList filterList = new FilterList();
  filterList.addFilter(new PrefixFilter(ROW));
  filterList.addFilter(new PrefixFilter(ANOTHER_ROW));
  get.setFilter(filterList);
  iResult = cpClient.themisGet(TABLENAME, get, prewriteTs);
  Assert.assertTrue(iResult.isEmpty());
  filterList = new FilterList(Operator.MUST_PASS_ONE);
  filterList.addFilter(new PrefixFilter(ROW));
  filterList.addFilter(new PrefixFilter(ANOTHER_ROW));
  get.setFilter(filterList);
  iResult = cpClient.themisGet(TABLENAME, get, prewriteTs);
  checkGetResultForDifferentTs(iResult);
}
 
开发者ID:XiaoMi,项目名称:themis,代码行数:24,代码来源:TestThemisCoprocessorRead.java

示例11: buildScanner

import org.apache.hadoop.hbase.filter.PrefixFilter; //导入依赖的package包/类
private ResultScanner buildScanner(String keyPrefix, String value, Table ht)
    throws IOException {
  // OurFilterList allFilters = new OurFilterList();
  FilterList allFilters = new FilterList(/* FilterList.Operator.MUST_PASS_ALL */);
  allFilters.addFilter(new PrefixFilter(Bytes.toBytes(keyPrefix)));
  SingleColumnValueFilter filter = new SingleColumnValueFilter(Bytes
      .toBytes("trans-tags"), Bytes.toBytes("qual2"), CompareOperator.EQUAL, Bytes
      .toBytes(value));
  filter.setFilterIfMissing(true);
  allFilters.addFilter(filter);

  // allFilters.addFilter(new
  // RowExcludingSingleColumnValueFilter(Bytes.toBytes("trans-tags"),
  // Bytes.toBytes("qual2"), CompareOp.EQUAL, Bytes.toBytes(value)));

  Scan scan = new Scan();
  scan.addFamily(Bytes.toBytes("trans-blob"));
  scan.addFamily(Bytes.toBytes("trans-type"));
  scan.addFamily(Bytes.toBytes("trans-date"));
  scan.addFamily(Bytes.toBytes("trans-tags"));
  scan.addFamily(Bytes.toBytes("trans-group"));
  scan.setFilter(allFilters);

  return ht.getScanner(scan);
}
 
开发者ID:apache,项目名称:hbase,代码行数:26,代码来源:TestFromClientSide.java

示例12: scannerOpenWithPrefix

import org.apache.hadoop.hbase.filter.PrefixFilter; //导入依赖的package包/类
@Override
public int scannerOpenWithPrefix(ByteBuffer tableName,
                                 ByteBuffer startAndPrefix,
                                 List<ByteBuffer> columns)
    throws IOError, TException {
  try {
    HTable table = getTable(tableName);
    Scan scan = new Scan(getBytes(startAndPrefix));
    Filter f = new WhileMatchFilter(
        new PrefixFilter(getBytes(startAndPrefix)));
    scan.setFilter(f);
    if(columns != null && columns.size() != 0) {
      for(ByteBuffer column : columns) {
        byte [][] famQf = KeyValue.parseColumn(getBytes(column));
        if(famQf.length == 1) {
          scan.addFamily(famQf[0]);
        } else {
          scan.addColumn(famQf[0], famQf[1]);
        }
      }
    }
    return addScanner(table.getScanner(scan));
  } catch (IOException e) {
    throw new IOError(e.getMessage());
  }
}
 
开发者ID:lifeng5042,项目名称:RStore,代码行数:27,代码来源:ThriftServer.java

示例13: testAddColumnFilterToScanPrefixFilter

import org.apache.hadoop.hbase.filter.PrefixFilter; //导入依赖的package包/类
@Test
public void testAddColumnFilterToScanPrefixFilter() throws Exception {
  ColumnFilter cf = new ColumnFilter( "Family" );
  cf.setConstant( "123" );
  cf.setSignedComparison( true );

  VariableSpace space = mockVariableSpace();
  connectionSpy.m_sourceScan = new Scan();
  HBaseValueMeta meta = new HBaseValueMeta( "colFamly,colname,Family", 1, 20, 1 );
  meta.setKey( true );
  meta.setIsLongOrDouble( true );
  doReturn( null ).when( connectionSpy ).getCompareOpByComparisonType( any( ColumnFilter.ComparisonType.class ) );

  connectionSpy.addColumnFilterToScan( cf, meta, space, true );
  FilterList filter = (FilterList) connectionSpy.m_sourceScan.getFilter();
  assertFalse( filter.getFilters().isEmpty() );
  Assert.assertEquals( filter.getFilters().size(), 1 );
  Assert.assertEquals( PrefixFilter.class, filter.getFilters().get( 0 ).getClass() );
}
 
开发者ID:pentaho,项目名称:pentaho-hadoop-shims,代码行数:20,代码来源:CommonHBaseConnectionTest.java

示例14: getFlow

import org.apache.hadoop.hbase.filter.PrefixFilter; //导入依赖的package包/类
/**
 * Returns the {@link Flow} instance matching the application ID and run ID.
 *
 * @param cluster the cluster identifier
 * @param user the user running the jobs
 * @param appId the application description
 * @param runId the specific run ID for the flow
 * @param populateTasks whether or not to populate the task details for each
 *          job
 * @return
 */
public Flow getFlow(String cluster, String user, String appId, long runId,
    boolean populateTasks) throws IOException {
  Flow flow = null;

  byte[] startRow = ByteUtil.join(Constants.SEP_BYTES, Bytes.toBytes(cluster),
      Bytes.toBytes(user), Bytes.toBytes(appId),
      Bytes.toBytes(FlowKey.encodeRunId(runId)), Constants.EMPTY_BYTES);

  LOG.info(
      "Reading job_history rows start at " + Bytes.toStringBinary(startRow));
  Scan scan = new Scan();
  // start scanning history at cluster!user!app!run!
  scan.setStartRow(startRow);
  // require that all results match this flow prefix
  scan.setFilter(new WhileMatchFilter(new PrefixFilter(startRow)));

  List<Flow> flows = createFromResults(scan, populateTasks, 1);
  if (flows.size() > 0) {
    flow = flows.get(0);
  }

  return flow;
}
 
开发者ID:twitter,项目名称:hraven,代码行数:35,代码来源:JobHistoryService.java

示例15: getFlowByJobID

import org.apache.hadoop.hbase.filter.PrefixFilter; //导入依赖的package包/类
/**
 * Returns the {@link Flow} instance containing the given job ID.
 *
 * @param cluster the cluster identifier
 * @param jobId the job identifier
 * @return
 */
public Flow getFlowByJobID(String cluster, String jobId,
    boolean populateTasks) throws IOException {
  Flow flow = null;
  JobKey key = idService.getJobKeyById(new QualifiedJobId(cluster, jobId));
  if (key != null) {
    byte[] startRow =
        ByteUtil.join(Constants.SEP_BYTES, Bytes.toBytes(key.getCluster()),
            Bytes.toBytes(key.getUserName()), Bytes.toBytes(key.getAppId()),
            Bytes.toBytes(key.getEncodedRunId()), Constants.EMPTY_BYTES);

    LOG.info("Reading job_history rows start at "
        + Bytes.toStringBinary(startRow));
    Scan scan = new Scan();
    // start scanning history at cluster!user!app!run!
    scan.setStartRow(startRow);
    // require that all results match this flow prefix
    scan.setFilter(new WhileMatchFilter(new PrefixFilter(startRow)));

    List<Flow> flows = createFromResults(scan, populateTasks, 1);
    if (flows.size() > 0) {
      flow = flows.get(0);
    }
  }
  return flow;
}
 
开发者ID:twitter,项目名称:hraven,代码行数:33,代码来源:JobHistoryService.java


注:本文中的org.apache.hadoop.hbase.filter.PrefixFilter类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。