當前位置: 首頁>>代碼示例>>Java>>正文


Java SingleColumnValueFilter類代碼示例

本文整理匯總了Java中org.apache.hadoop.hbase.filter.SingleColumnValueFilter的典型用法代碼示例。如果您正苦於以下問題:Java SingleColumnValueFilter類的具體用法?Java SingleColumnValueFilter怎麽用?Java SingleColumnValueFilter使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。


SingleColumnValueFilter類屬於org.apache.hadoop.hbase.filter包,在下文中一共展示了SingleColumnValueFilter類的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: QueryByCondition2

import org.apache.hadoop.hbase.filter.SingleColumnValueFilter; //導入依賴的package包/類
public static void QueryByCondition2(String tableName) {

        try {
            HTablePool pool = new HTablePool(configuration, 1000);
            HTable table = (HTable) pool.getTable(tableName);
            Filter filter = new SingleColumnValueFilter(Bytes
                    .toBytes("column1"), null, CompareOp.EQUAL, Bytes
                    .toBytes("aaa")); // 當列column1的值為aaa時進行查詢
            Scan s = new Scan();
            s.setFilter(filter);
            ResultScanner rs = table.getScanner(s);
            for (Result r : rs) {
                System.out.println("獲得到rowkey:" + new String(r.getRow()));
                for (KeyValue keyValue : r.raw()) {
                    System.out.println("列:" + new String(keyValue.getFamily())
                            + "====值:" + new String(keyValue.getValue()));
                }
            }
        } catch (Exception e) {
            e.printStackTrace();
        }

    }
 
開發者ID:yjp123456,項目名稱:SparkDemo,代碼行數:24,代碼來源:MyClass.java

示例2: testTwoFilterWithMustAllPassFailed

import org.apache.hadoop.hbase.filter.SingleColumnValueFilter; //導入依賴的package包/類
@Test
public void testTwoFilterWithMustAllPassFailed() throws IOException {
    clean();
    {
        Put put = new Put(Bytes.toBytes(rowPrefix));
        put.addColumn(Bytes.toBytes(familyName), Bytes.toBytes("col_1"), Bytes.toBytes("col_1_var"));
        put.addColumn(Bytes.toBytes(familyName), Bytes.toBytes("col_2"), Bytes.toBytes("col_2_var"));
        table.put(put);
    }

    {
        Get get = new Get(Bytes.toBytes(rowPrefix));
        Filter filter1 = new SingleColumnValueFilter(Bytes.toBytes(familyName), Bytes.toBytes("col_1"),
                CompareFilter.CompareOp.EQUAL, Bytes.toBytes("col_1_var"));
        Filter filter2 = new SingleColumnValueFilter(Bytes.toBytes(familyName), Bytes.toBytes("col_2"),
                CompareFilter.CompareOp.NOT_EQUAL, Bytes.toBytes("col_2_var"));
        FilterList filterList = new FilterList(FilterList.Operator.MUST_PASS_ALL);
        filterList.addFilter(filter1);
        filterList.addFilter(filter2);

        get.setFilter(filterList);
        Result result = table.get(get);
        assertTrue(result.getRow() == null);
    }
}
 
開發者ID:aliyun,項目名稱:aliyun-tablestore-hbase-client,代碼行數:26,代碼來源:TestFilterList.java

示例3: testTwoFilterWithMustOnePassFailed

import org.apache.hadoop.hbase.filter.SingleColumnValueFilter; //導入依賴的package包/類
@Test
public void testTwoFilterWithMustOnePassFailed() throws IOException {
    clean();
    {
        Put put = new Put(Bytes.toBytes(rowPrefix));
        put.addColumn(Bytes.toBytes(familyName), Bytes.toBytes("col_1"), Bytes.toBytes("col_1_var"));
        put.addColumn(Bytes.toBytes(familyName), Bytes.toBytes("col_2"), Bytes.toBytes("col_2_var"));
        table.put(put);
    }

    {
        Get get = new Get(Bytes.toBytes(rowPrefix));
        Filter filter1 = new SingleColumnValueFilter(Bytes.toBytes(familyName), Bytes.toBytes("col_1"),
                CompareFilter.CompareOp.NOT_EQUAL, Bytes.toBytes("col_1_var"));
        Filter filter2 = new SingleColumnValueFilter(Bytes.toBytes(familyName), Bytes.toBytes("col_2"),
                CompareFilter.CompareOp.NOT_EQUAL, Bytes.toBytes("col_2_var"));
        FilterList filterList = new FilterList(FilterList.Operator.MUST_PASS_ALL);
        filterList.addFilter(filter1);
        filterList.addFilter(filter2);

        get.setFilter(filterList);
        Result result = table.get(get);
        assertTrue(result.getRow() == null);
    }
}
 
開發者ID:aliyun,項目名稱:aliyun-tablestore-hbase-client,代碼行數:26,代碼來源:TestFilterList.java

示例4: doRawScan

import org.apache.hadoop.hbase.filter.SingleColumnValueFilter; //導入依賴的package包/類
private void doRawScan() throws IOException {
  FilterList filterList = new FilterList();
  CompareFilter.CompareOp startOp = CompareFilter.CompareOp.GREATER_OR_EQUAL;
  CompareFilter.CompareOp stopOp = CompareFilter.CompareOp.LESS_OR_EQUAL;
  for (int i = 0; i < indexColumnNames.length && i < scanValues.length; i++) {
    filterList.addFilter(
        new SingleColumnValueFilter(familyName, Bytes.toBytes(indexColumnNames[i]), startOp,
            Bytes.toBytes(scanValues[i][0])));
    filterList.addFilter(
        new SingleColumnValueFilter(familyName, Bytes.toBytes(indexColumnNames[i]), stopOp,
            Bytes.toBytes(scanValues[i][1])));
  }
  Scan scan = new Scan();
  scan.setFilter(filterList);
  scan.setId("raw-scan");
  Table table = conn.getTable(tableName);
  ResultScanner scanner = table.getScanner(scan);
  Result result;
  int count = 0;
  while ((result = scanner.next()) != null) {
    ++count;
    if (PRINT_RESULT) printResult(result);
  }
  scanner.close();
  System.out.println("raw scan has " + count + " records");
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:27,代碼來源:LMDTester.java

示例5: preprocess

import org.apache.hadoop.hbase.filter.SingleColumnValueFilter; //導入依賴的package包/類
public static ConditionTree preprocess(HRegion region, Filter filter, float maxScale) {
  if (filter == null) return null;
  ConditionTree tree = null;
  if (isIndexFilter(region, filter)) {
    System.out.println("preprocess A");
    tree = new ConditionTreeNoneLeafNode(region, (SingleColumnValueFilter) filter, maxScale);
  } else if (filter instanceof FilterList) {
    System.out.println("preprocess B");
    tree = new ConditionTreeNoneLeafNode(region, (FilterList) filter, maxScale);
  }
  if (tree.isPrune()) {
    System.out.println("return null for prune");
    return null;
  } else {
    return tree;
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:18,代碼來源:ScanPreprocess.java

示例6: constructScan

import org.apache.hadoop.hbase.filter.SingleColumnValueFilter; //導入依賴的package包/類
protected Scan constructScan(byte[] valuePrefix) throws IOException {
  FilterList list = new FilterList();
  Filter filter = new SingleColumnValueFilter(
      FAMILY_NAME, COLUMN_ZERO, CompareFilter.CompareOp.EQUAL,
      new BinaryComparator(valuePrefix)
  );
  list.addFilter(filter);
  if(opts.filterAll) {
    list.addFilter(new FilterAllFilter());
  }
  Scan scan = new Scan();
  scan.setCaching(opts.caching);
  if (opts.addColumns) {
    scan.addColumn(FAMILY_NAME, QUALIFIER_NAME);
  } else {
    scan.addFamily(FAMILY_NAME);
  }
  scan.setFilter(list);
  return scan;
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:21,代碼來源:PerformanceEvaluation.java

示例7: buildScanner

import org.apache.hadoop.hbase.filter.SingleColumnValueFilter; //導入依賴的package包/類
private ResultScanner buildScanner(String keyPrefix, String value, Table ht)
    throws IOException {
  // OurFilterList allFilters = new OurFilterList();
  FilterList allFilters = new FilterList(/* FilterList.Operator.MUST_PASS_ALL */);
  allFilters.addFilter(new PrefixFilter(Bytes.toBytes(keyPrefix)));
  SingleColumnValueFilter filter = new SingleColumnValueFilter(Bytes
      .toBytes("trans-tags"), Bytes.toBytes("qual2"), CompareOp.EQUAL, Bytes
      .toBytes(value));
  filter.setFilterIfMissing(true);
  allFilters.addFilter(filter);

  // allFilters.addFilter(new
  // RowExcludingSingleColumnValueFilter(Bytes.toBytes("trans-tags"),
  // Bytes.toBytes("qual2"), CompareOp.EQUAL, Bytes.toBytes(value)));

  Scan scan = new Scan();
  scan.addFamily(Bytes.toBytes("trans-blob"));
  scan.addFamily(Bytes.toBytes("trans-type"));
  scan.addFamily(Bytes.toBytes("trans-date"));
  scan.addFamily(Bytes.toBytes("trans-tags"));
  scan.addFamily(Bytes.toBytes("trans-group"));
  scan.setFilter(allFilters);

  return ht.getScanner(scan);
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:26,代碼來源:TestFromClientSide.java

示例8: buildScanner

import org.apache.hadoop.hbase.filter.SingleColumnValueFilter; //導入依賴的package包/類
private InternalScanner buildScanner(String keyPrefix, String value, HRegion r)
    throws IOException {
  // Defaults FilterList.Operator.MUST_PASS_ALL.
  FilterList allFilters = new FilterList();
  allFilters.addFilter(new PrefixFilter(Bytes.toBytes(keyPrefix)));
  // Only return rows where this column value exists in the row.
  SingleColumnValueFilter filter = new SingleColumnValueFilter(Bytes.toBytes("trans-tags"),
      Bytes.toBytes("qual2"), CompareOp.EQUAL, Bytes.toBytes(value));
  filter.setFilterIfMissing(true);
  allFilters.addFilter(filter);
  Scan scan = new Scan();
  scan.addFamily(Bytes.toBytes("trans-blob"));
  scan.addFamily(Bytes.toBytes("trans-type"));
  scan.addFamily(Bytes.toBytes("trans-date"));
  scan.addFamily(Bytes.toBytes("trans-tags"));
  scan.addFamily(Bytes.toBytes("trans-group"));
  scan.setFilter(allFilters);
  return r.getScanner(scan);
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:20,代碼來源:TestHRegion.java

示例9: runScanner

import org.apache.hadoop.hbase.filter.SingleColumnValueFilter; //導入依賴的package包/類
private void runScanner(Table table, boolean slow) throws Exception {
  long time = System.nanoTime();
  Scan scan = new Scan();
  scan.addColumn(cf_essential, col_name);
  scan.addColumn(cf_joined, col_name);

  SingleColumnValueFilter filter = new SingleColumnValueFilter(
      cf_essential, col_name, CompareFilter.CompareOp.EQUAL, flag_yes);
  filter.setFilterIfMissing(true);
  scan.setFilter(filter);
  scan.setLoadColumnFamiliesOnDemand(!slow);

  ResultScanner result_scanner = table.getScanner(scan);
  Result res;
  long rows_count = 0;
  while ((res = result_scanner.next()) != null) {
    rows_count++;
  }

  double timeSec = (System.nanoTime() - time) / 1000000000.0;
  result_scanner.close();
  LOG.info((slow ? "Slow" : "Joined") + " scanner finished in " + Double.toString(timeSec)
    + " seconds, got " + Long.toString(rows_count/2) + " rows");
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:25,代碼來源:TestJoinedScanners.java

示例10: fromFilter

import org.apache.hadoop.hbase.filter.SingleColumnValueFilter; //導入依賴的package包/類
public static Range[] fromFilter(SingleColumnValueFilter filter) {
  if (!(filter.getComparator() instanceof BinaryComparator)) {
    return new Range[0];
  }

  byte[] column = KeyValue.makeColumn(filter.getFamily(), filter.getQualifier());
  CompareOp compareOp = filter.getOperator();
  byte[] value = filter.getComparator().getValue();

  if (compareOp == CompareOp.NOT_EQUAL) {
    return new Range[] { new Range(column, null, CompareOp.NO_OP, value, CompareOp.LESS),
        new Range(column, value, CompareOp.GREATER, null, CompareOp.NO_OP) };
  } else {
    switch (compareOp) {
    case EQUAL:
    case GREATER_OR_EQUAL:
    case GREATER:
      return new Range[] { new Range(column, value, compareOp, null, CompareOp.NO_OP) };
    case LESS:
    case LESS_OR_EQUAL:
      return new Range[] { new Range(column, null, CompareOp.NO_OP, value, compareOp) };
    default:
      return new Range[0];
    }
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:27,代碼來源:Range.java

示例11: getColumnValueFilters

import org.apache.hadoop.hbase.filter.SingleColumnValueFilter; //導入依賴的package包/類
private FilterList getColumnValueFilters(Row row) {
  FilterList filterList = new FilterList(Operator.MUST_PASS_ALL);
  Set<String> filterColumnNames = Sets.newHashSet(row.schema().fieldNames());
  
  for (Map.Entry<String, ColumnDef> column : columns.entrySet()) {
    if (!column.getValue().cf.equals("rowkey")) {
      if (filterColumnNames.contains(column.getKey())) {
        byte[] value = getColumnValueAsBytes(column.getValue().name, column.getValue().type, row);
        if (value != null) {
          SingleColumnValueFilter columnValueFilter = new SingleColumnValueFilter(
              Bytes.toBytes(column.getValue().cf),
              Bytes.toBytes(column.getValue().name),
              CompareFilter.CompareOp.EQUAL,
              value
          );
          filterList.addFilter(columnValueFilter);
        }
      }
    }
  }
  
  return filterList;
}
 
開發者ID:cloudera-labs,項目名稱:envelope,代碼行數:24,代碼來源:DefaultHBaseSerde.java

示例12: apply

import org.apache.hadoop.hbase.filter.SingleColumnValueFilter; //導入依賴的package包/類
/**
 * Applies the message to {@link org.apache.hadoop.hbase.filter.Filter} to context.
 */
@Override
public void apply(CamelContext context, HBaseRow rowModel) {
    fl.getFilters().clear();
    if (rowModel != null) {
        for (HBaseCell cell : rowModel.getCells()) {
            if (cell.getValue() != null) {
                byte[] family = HBaseHelper.getHBaseFieldAsBytes(cell.getFamily());
                byte[] qualifier = HBaseHelper.getHBaseFieldAsBytes(cell.getQualifier());
                byte[] value = context.getTypeConverter().convertTo(byte[].class, cell.getValue());
                SingleColumnValueFilter columnValueFilter = new SingleColumnValueFilter(family, qualifier, CompareFilter.CompareOp.EQUAL, value);
                fl.addFilter(columnValueFilter);
            }
        }
    }
}
 
開發者ID:HydAu,項目名稱:Camel,代碼行數:19,代碼來源:ModelAwareColumnMatchingFilter.java

示例13: preprocess

import org.apache.hadoop.hbase.filter.SingleColumnValueFilter; //導入依賴的package包/類
public static ConditionTree preprocess(HRegion region, Filter filter, float maxScale) {
  if (filter == null) return null;
  ConditionTree tree = null;
  if (isIndexFilter(region, filter)) {
    tree = new ConditionTreeNoneLeafNode(region, (SingleColumnValueFilter) filter, maxScale);
  } else if (filter instanceof FilterList) {
    tree = new ConditionTreeNoneLeafNode(region, (FilterList) filter, maxScale);
  }

  if (tree.isPrune()) {
    System.out.println("winter tree is prune");
    return null;
  } else {
    return tree;
  }
}
 
開發者ID:fengchen8086,項目名稱:LCIndex-HBase-0.94.16,代碼行數:17,代碼來源:ScanPreprocess.java

示例14: buildScanner

import org.apache.hadoop.hbase.filter.SingleColumnValueFilter; //導入依賴的package包/類
private ResultScanner buildScanner(String keyPrefix, String value, HTable ht)
    throws IOException {
  // OurFilterList allFilters = new OurFilterList();
  FilterList allFilters = new FilterList(/* FilterList.Operator.MUST_PASS_ALL */);
  allFilters.addFilter(new PrefixFilter(Bytes.toBytes(keyPrefix)));
  SingleColumnValueFilter filter = new SingleColumnValueFilter(Bytes
      .toBytes("trans-tags"), Bytes.toBytes("qual2"), CompareOp.EQUAL, Bytes
      .toBytes(value));
  filter.setFilterIfMissing(true);
  allFilters.addFilter(filter);

  // allFilters.addFilter(new
  // RowExcludingSingleColumnValueFilter(Bytes.toBytes("trans-tags"),
  // Bytes.toBytes("qual2"), CompareOp.EQUAL, Bytes.toBytes(value)));

  Scan scan = new Scan();
  scan.addFamily(Bytes.toBytes("trans-blob"));
  scan.addFamily(Bytes.toBytes("trans-type"));
  scan.addFamily(Bytes.toBytes("trans-date"));
  scan.addFamily(Bytes.toBytes("trans-tags"));
  scan.addFamily(Bytes.toBytes("trans-group"));
  scan.setFilter(allFilters);

  return ht.getScanner(scan);
}
 
開發者ID:fengchen8086,項目名稱:LCIndex-HBase-0.94.16,代碼行數:26,代碼來源:TestFromClientSide.java

示例15: getScanner

import org.apache.hadoop.hbase.filter.SingleColumnValueFilter; //導入依賴的package包/類
@Override
public ResultScanner getScanner() throws IOException {
  Scan scan = new Scan();
  FilterList filters = new FilterList();
  for (Range range : ranges) {
    if (range.getStartValue() != null) {
      filters.addFilter(new SingleColumnValueFilter(range.getFamily(), range.getQualifier(),
          range.getStartType(), range.getStartValue()));
    }
    if (range.getStopValue() != null) {
      filters.addFilter(new SingleColumnValueFilter(range.getFamily(), range.getQualifier(),
          range.getStopType(), range.getStopValue()));
    }
    System.out.println("coffey hbase main index range: " + Bytes.toString(range.getColumn())
        + " ["
        + LCCIndexConstant.getStringOfValueAndType(range.getDataType(), range.getStartValue())
        + ","
        + LCCIndexConstant.getStringOfValueAndType(range.getDataType(), range.getStopValue())
        + "]");
    scan.setCacheBlocks(false);
  }
  scan.setCacheBlocks(false);
  scan.setFilter(filters);
  return table.getScanner(scan);
}
 
開發者ID:fengchen8086,項目名稱:LCIndex-HBase-0.94.16,代碼行數:26,代碼來源:TPCHScanHBase.java


注:本文中的org.apache.hadoop.hbase.filter.SingleColumnValueFilter類示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。