當前位置: 首頁>>代碼示例>>Java>>正文


Java FilterList.addFilter方法代碼示例

本文整理匯總了Java中org.apache.hadoop.hbase.filter.FilterList.addFilter方法的典型用法代碼示例。如果您正苦於以下問題:Java FilterList.addFilter方法的具體用法?Java FilterList.addFilter怎麽用?Java FilterList.addFilter使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.apache.hadoop.hbase.filter.FilterList的用法示例。


在下文中一共展示了FilterList.addFilter方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: getColumnValueFilters

import org.apache.hadoop.hbase.filter.FilterList; //導入方法依賴的package包/類
private FilterList getColumnValueFilters(Row row) {
  FilterList filterList = new FilterList(Operator.MUST_PASS_ALL);
  Set<String> filterColumnNames = Sets.newHashSet(row.schema().fieldNames());
  
  for (Map.Entry<String, ColumnDef> column : columns.entrySet()) {
    if (!column.getValue().cf.equals("rowkey")) {
      if (filterColumnNames.contains(column.getKey())) {
        byte[] value = getColumnValueAsBytes(column.getValue().name, column.getValue().type, row);
        if (value != null) {
          SingleColumnValueFilter columnValueFilter = new SingleColumnValueFilter(
              Bytes.toBytes(column.getValue().cf),
              Bytes.toBytes(column.getValue().name),
              CompareFilter.CompareOp.EQUAL,
              value
          );
          filterList.addFilter(columnValueFilter);
        }
      }
    }
  }
  
  return filterList;
}
 
開發者ID:cloudera-labs,項目名稱:envelope,代碼行數:24,代碼來源:DefaultHBaseSerde.java

示例2: createFilter

import org.apache.hadoop.hbase.filter.FilterList; //導入方法依賴的package包/類
static Optional<Filter> createFilter(Object[] args) {
  if (args.length == 0) {
    return Optional.empty();
  }
  FilterList filters = new FilterList();
  for (int i = 0; i < args.length; i++) {
    Object filter = args[i];
    try {
      checkArgument(filter instanceof Filter,
          "Filter " + i + " must be of type " + Filter.class.getName()
              + " but is of type " + filter.getClass().getName());
    } catch (IllegalArgumentException e) {
      throw new CacheLoaderException(e);
    }
    filters.addFilter((Filter) filter);
  }
  return Optional.of(filters);
}
 
開發者ID:bakdata,項目名稱:ignite-hbase,代碼行數:19,代碼來源:FilterParser.java

示例3: testTwoFilterWithMustAllPassFailed

import org.apache.hadoop.hbase.filter.FilterList; //導入方法依賴的package包/類
@Test
public void testTwoFilterWithMustAllPassFailed() throws IOException {
    clean();
    {
        Put put = new Put(Bytes.toBytes(rowPrefix));
        put.addColumn(Bytes.toBytes(familyName), Bytes.toBytes("col_1"), Bytes.toBytes("col_1_var"));
        put.addColumn(Bytes.toBytes(familyName), Bytes.toBytes("col_2"), Bytes.toBytes("col_2_var"));
        table.put(put);
    }

    {
        Get get = new Get(Bytes.toBytes(rowPrefix));
        Filter filter1 = new SingleColumnValueFilter(Bytes.toBytes(familyName), Bytes.toBytes("col_1"),
                CompareFilter.CompareOp.EQUAL, Bytes.toBytes("col_1_var"));
        Filter filter2 = new SingleColumnValueFilter(Bytes.toBytes(familyName), Bytes.toBytes("col_2"),
                CompareFilter.CompareOp.NOT_EQUAL, Bytes.toBytes("col_2_var"));
        FilterList filterList = new FilterList(FilterList.Operator.MUST_PASS_ALL);
        filterList.addFilter(filter1);
        filterList.addFilter(filter2);

        get.setFilter(filterList);
        Result result = table.get(get);
        assertTrue(result.getRow() == null);
    }
}
 
開發者ID:aliyun,項目名稱:aliyun-tablestore-hbase-client,代碼行數:26,代碼來源:TestFilterList.java

示例4: testTwoFilterWithMustOnePassFailed

import org.apache.hadoop.hbase.filter.FilterList; //導入方法依賴的package包/類
@Test
public void testTwoFilterWithMustOnePassFailed() throws IOException {
    clean();
    {
        Put put = new Put(Bytes.toBytes(rowPrefix));
        put.addColumn(Bytes.toBytes(familyName), Bytes.toBytes("col_1"), Bytes.toBytes("col_1_var"));
        put.addColumn(Bytes.toBytes(familyName), Bytes.toBytes("col_2"), Bytes.toBytes("col_2_var"));
        table.put(put);
    }

    {
        Get get = new Get(Bytes.toBytes(rowPrefix));
        Filter filter1 = new SingleColumnValueFilter(Bytes.toBytes(familyName), Bytes.toBytes("col_1"),
                CompareFilter.CompareOp.NOT_EQUAL, Bytes.toBytes("col_1_var"));
        Filter filter2 = new SingleColumnValueFilter(Bytes.toBytes(familyName), Bytes.toBytes("col_2"),
                CompareFilter.CompareOp.NOT_EQUAL, Bytes.toBytes("col_2_var"));
        FilterList filterList = new FilterList(FilterList.Operator.MUST_PASS_ALL);
        filterList.addFilter(filter1);
        filterList.addFilter(filter2);

        get.setFilter(filterList);
        Result result = table.get(get);
        assertTrue(result.getRow() == null);
    }
}
 
開發者ID:aliyun,項目名稱:aliyun-tablestore-hbase-client,代碼行數:26,代碼來源:TestFilterList.java

示例5: doRawScan

import org.apache.hadoop.hbase.filter.FilterList; //導入方法依賴的package包/類
private void doRawScan() throws IOException {
  FilterList filterList = new FilterList();
  CompareFilter.CompareOp startOp = CompareFilter.CompareOp.GREATER_OR_EQUAL;
  CompareFilter.CompareOp stopOp = CompareFilter.CompareOp.LESS_OR_EQUAL;
  for (int i = 0; i < indexColumnNames.length && i < scanValues.length; i++) {
    filterList.addFilter(
        new SingleColumnValueFilter(familyName, Bytes.toBytes(indexColumnNames[i]), startOp,
            Bytes.toBytes(scanValues[i][0])));
    filterList.addFilter(
        new SingleColumnValueFilter(familyName, Bytes.toBytes(indexColumnNames[i]), stopOp,
            Bytes.toBytes(scanValues[i][1])));
  }
  Scan scan = new Scan();
  scan.setFilter(filterList);
  scan.setId("raw-scan");
  Table table = conn.getTable(tableName);
  ResultScanner scanner = table.getScanner(scan);
  Result result;
  int count = 0;
  while ((result = scanner.next()) != null) {
    ++count;
    if (PRINT_RESULT) printResult(result);
  }
  scanner.close();
  System.out.println("raw scan has " + count + " records");
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:27,代碼來源:LMDTester.java

示例6: testRow

import org.apache.hadoop.hbase.filter.FilterList; //導入方法依賴的package包/類
@Override
void testRow(final int i) throws IOException {
  Scan scan = new Scan(getRandomRow(this.rand, opts.totalRows));
  scan.setCaching(opts.caching);
  FilterList list = new FilterList();
  if (opts.addColumns) {
    scan.addColumn(FAMILY_NAME, QUALIFIER_NAME);
  } else {
    scan.addFamily(FAMILY_NAME);
  }
  if (opts.filterAll) {
    list.addFilter(new FilterAllFilter());
  }
  list.addFilter(new WhileMatchFilter(new PageFilter(120)));
  scan.setFilter(list);
  ResultScanner s = this.table.getScanner(scan);
  for (Result rr; (rr = s.next()) != null;) {
    updateValueSize(rr);
  }
  s.close();
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:22,代碼來源:PerformanceEvaluation.java

示例7: constructScan

import org.apache.hadoop.hbase.filter.FilterList; //導入方法依賴的package包/類
protected Scan constructScan(byte[] valuePrefix) throws IOException {
  FilterList list = new FilterList();
  Filter filter = new SingleColumnValueFilter(
      FAMILY_NAME, COLUMN_ZERO, CompareFilter.CompareOp.EQUAL,
      new BinaryComparator(valuePrefix)
  );
  list.addFilter(filter);
  if(opts.filterAll) {
    list.addFilter(new FilterAllFilter());
  }
  Scan scan = new Scan();
  scan.setCaching(opts.caching);
  if (opts.addColumns) {
    scan.addColumn(FAMILY_NAME, QUALIFIER_NAME);
  } else {
    scan.addFamily(FAMILY_NAME);
  }
  scan.setFilter(list);
  return scan;
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:21,代碼來源:PerformanceEvaluation.java

示例8: buildScanner

import org.apache.hadoop.hbase.filter.FilterList; //導入方法依賴的package包/類
private ResultScanner buildScanner(String keyPrefix, String value, Table ht)
    throws IOException {
  // OurFilterList allFilters = new OurFilterList();
  FilterList allFilters = new FilterList(/* FilterList.Operator.MUST_PASS_ALL */);
  allFilters.addFilter(new PrefixFilter(Bytes.toBytes(keyPrefix)));
  SingleColumnValueFilter filter = new SingleColumnValueFilter(Bytes
      .toBytes("trans-tags"), Bytes.toBytes("qual2"), CompareOp.EQUAL, Bytes
      .toBytes(value));
  filter.setFilterIfMissing(true);
  allFilters.addFilter(filter);

  // allFilters.addFilter(new
  // RowExcludingSingleColumnValueFilter(Bytes.toBytes("trans-tags"),
  // Bytes.toBytes("qual2"), CompareOp.EQUAL, Bytes.toBytes(value)));

  Scan scan = new Scan();
  scan.addFamily(Bytes.toBytes("trans-blob"));
  scan.addFamily(Bytes.toBytes("trans-type"));
  scan.addFamily(Bytes.toBytes("trans-date"));
  scan.addFamily(Bytes.toBytes("trans-tags"));
  scan.addFamily(Bytes.toBytes("trans-group"));
  scan.setFilter(allFilters);

  return ht.getScanner(scan);
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:26,代碼來源:TestFromClientSide.java

示例9: buildScanner

import org.apache.hadoop.hbase.filter.FilterList; //導入方法依賴的package包/類
private InternalScanner buildScanner(String keyPrefix, String value, HRegion r)
    throws IOException {
  // Defaults FilterList.Operator.MUST_PASS_ALL.
  FilterList allFilters = new FilterList();
  allFilters.addFilter(new PrefixFilter(Bytes.toBytes(keyPrefix)));
  // Only return rows where this column value exists in the row.
  SingleColumnValueFilter filter = new SingleColumnValueFilter(Bytes.toBytes("trans-tags"),
      Bytes.toBytes("qual2"), CompareOp.EQUAL, Bytes.toBytes(value));
  filter.setFilterIfMissing(true);
  allFilters.addFilter(filter);
  Scan scan = new Scan();
  scan.addFamily(Bytes.toBytes("trans-blob"));
  scan.addFamily(Bytes.toBytes("trans-type"));
  scan.addFamily(Bytes.toBytes("trans-date"));
  scan.addFamily(Bytes.toBytes("trans-tags"));
  scan.addFamily(Bytes.toBytes("trans-group"));
  scan.setFilter(allFilters);
  return r.getScanner(scan);
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:20,代碼來源:TestHRegion.java

示例10: getVertexIndexScanWithLimit

import org.apache.hadoop.hbase.filter.FilterList; //導入方法依賴的package包/類
private Scan getVertexIndexScanWithLimit(String label, boolean isUnique, String key, Object from, int limit, boolean reversed) {
    byte[] prefix = serializeForRead(label, isUnique, key, null);
    byte[] startRow = from != null
            ? serializeForRead(label, isUnique, key, from)
            : prefix;
    byte[] stopRow = HConstants.EMPTY_END_ROW;
    if (graph.configuration().getInstanceType() == HBaseGraphConfiguration.InstanceType.BIGTABLE) {
        if (reversed) {
            throw new UnsupportedOperationException("Reverse scans not supported by Bigtable");
        } else {
            // PrefixFilter in Bigtable does not automatically stop
            // See https://github.com/GoogleCloudPlatform/cloud-bigtable-client/issues/1087
            stopRow = HBaseGraphUtils.incrementBytes(prefix);
        }
    }
    if (reversed) startRow = HBaseGraphUtils.incrementBytes(startRow);
    Scan scan = new Scan(startRow, stopRow);
    FilterList filterList = new FilterList();
    filterList.addFilter(new PrefixFilter(prefix));
    filterList.addFilter(new PageFilter(limit));
    scan.setFilter(filterList);
    scan.setReversed(reversed);
    return scan;
}
 
開發者ID:rayokota,項目名稱:hgraphdb,代碼行數:25,代碼來源:VertexIndexModel.java

示例11: isReallyEmptyRegion

import org.apache.hadoop.hbase.filter.FilterList; //導入方法依賴的package包/類
public static boolean isReallyEmptyRegion(HConnection connection,
    String tableName, HRegionInfo regionInfo) throws IOException {
    boolean emptyRegion = false;
    // verify really empty region by scanning records
    try (HTableInterface table = connection.getTable(tableName)) {
        Scan scan = new Scan(regionInfo.getStartKey(), regionInfo.getEndKey());
        FilterList filterList = new FilterList();
        filterList.addFilter(new KeyOnlyFilter());
        filterList.addFilter(new FirstKeyOnlyFilter());
        scan.setFilter(filterList);
        scan.setCacheBlocks(false);
        scan.setSmall(true);
        scan.setCaching(1);

        try (ResultScanner scanner = table.getScanner(scan)) {
            if (scanner.next() == null) emptyRegion = true;
        }
    }
    return emptyRegion;
}
 
開發者ID:kakao,項目名稱:hbase-tools,代碼行數:21,代碼來源:CommandAdapter.java

示例12: testShouldRetrieveNegativeIntValue

import org.apache.hadoop.hbase.filter.FilterList; //導入方法依賴的package包/類
@Test(timeout = 180000)
public void testShouldRetrieveNegativeIntValue() throws Exception {
  Configuration conf = UTIL.getConfiguration();
  String userTableName = "testShouldRetrieveNegativeIntValue";
  HTableDescriptor ihtd = new HTableDescriptor(TableName.valueOf(userTableName));
  HColumnDescriptor hcd = new HColumnDescriptor("cf1");
  ihtd.addFamily(hcd);
  admin.createTable(ihtd);
  HTable table = new HTable(conf, userTableName);
  rangePutForIdx2WithInteger(table);
  FilterList masterFilter = new FilterList(Operator.MUST_PASS_ALL);
  SingleColumnValueFilter scvf =
      new SingleColumnValueFilter("cf1".getBytes(), "c1".getBytes(), CompareOp.GREATER,
          new IntComparator(Bytes.toBytes(-6)));
  masterFilter.addFilter(scvf);
  Scan scan = new Scan();
  scan.setFilter(masterFilter);
  ResultScanner scanner = table.getScanner(scan);
  List<Result> testRes = new ArrayList<Result>();
  Result[] result = scanner.next(1);
  while (result != null && result.length > 0) {
    testRes.add(result[0]);
    result = scanner.next(1);
  }
  assertTrue(testRes.size() == 5);
}
 
開發者ID:tenggyut,項目名稱:HIndex,代碼行數:27,代碼來源:TestMultipleIndicesInScan.java

示例13: getFilter

import org.apache.hadoop.hbase.filter.FilterList; //導入方法依賴的package包/類
/** 
 * Gets filter based on the condition to be performed
 * @param condition To be applied server sided
 * @param passAll boolean if all elements have to pass the test
 * @param famNames to be checked
 * @param qualNames to be checked
 * @param params that could be needed
 * @return FilterList containing all filters needed
 */
public FilterList getFilter(XPath condition, boolean passAll,
        String []famNames, String []qualNames, String []params) {
    FilterList list = new FilterList((passAll)?FilterList.Operator.MUST_PASS_ALL:
    FilterList.Operator.MUST_PASS_ONE);
    for (int iCont = 0; iCont < famNames.length; iCont ++) {
        SingleColumnValueFilter filterTmp = new SingleColumnValueFilter(
            Bytes.toBytes(famNames[iCont]),
            Bytes.toBytes(qualNames[iCont]),
            CompareOp.EQUAL,
            Bytes.toBytes(params[iCont])
            );
        list.addFilter(filterTmp);
    }
    return list;
}
 
開發者ID:dkmfbk,項目名稱:knowledgestore,代碼行數:25,代碼來源:AbstractHBaseUtils.java

示例14: applyFuzzyFilter

import org.apache.hadoop.hbase.filter.FilterList; //導入方法依賴的package包/類
public static void applyFuzzyFilter(Scan scan, List<org.apache.kylin.common.util.Pair<byte[], byte[]>> fuzzyKeys) {
    if (fuzzyKeys != null && fuzzyKeys.size() > 0) {
        FuzzyRowFilter rowFilter = new FuzzyRowFilter(convertToHBasePair(fuzzyKeys));

        Filter filter = scan.getFilter();
        if (filter != null) {
            // may have existed InclusiveStopFilter, see buildScan
            FilterList filterList = new FilterList();
            filterList.addFilter(filter);
            filterList.addFilter(rowFilter);
            scan.setFilter(filterList);
        } else {
            scan.setFilter(rowFilter);
        }
    }
}
 
開發者ID:apache,項目名稱:kylin,代碼行數:17,代碼來源:CubeHBaseRPC.java

示例15: initScans

import org.apache.hadoop.hbase.filter.FilterList; //導入方法依賴的package包/類
/**
 * 初始化scan集合
 * 
 * @param job
 * @return
 */
private List<Scan> initScans(Job job) {
	Configuration conf = job.getConfiguration();
	// 獲取運行時間: yyyy-MM-dd
	String date = conf.get(GlobalConstants.RUNNING_DATE_PARAMES);
	long startDate = TimeUtil.parseString2Long(date);
	long endDate = startDate + GlobalConstants.DAY_OF_MILLISECONDS;

	Scan scan = new Scan();
	// 定義hbase掃描的開始rowkey和結束rowkey
	scan.setStartRow(Bytes.toBytes("" + startDate));
	scan.setStopRow(Bytes.toBytes("" + endDate));

	FilterList filterList = new FilterList();
	// 定義mapper中需要獲取的列名
	String[] columns = new String[] { EventLogConstants.LOG_COLUMN_NAME_UUID, // 用戶id
			EventLogConstants.LOG_COLUMN_NAME_SERVER_TIME, // 服務器時間
			EventLogConstants.LOG_COLUMN_NAME_PLATFORM, // 平台名稱
			EventLogConstants.LOG_COLUMN_NAME_BROWSER_NAME, // 瀏覽器名稱
			EventLogConstants.LOG_COLUMN_NAME_BROWSER_VERSION // 瀏覽器版本號
	};
	filterList.addFilter(this.getColumnFilter(columns));

	scan.setAttribute(Scan.SCAN_ATTRIBUTES_TABLE_NAME, Bytes.toBytes(EventLogConstants.HBASE_NAME_EVENT_LOGS));
	scan.setFilter(filterList);
	return Lists.newArrayList(scan);
}
 
開發者ID:liuhaozzu,項目名稱:big_data,代碼行數:33,代碼來源:ActiveUserRunner.java


注:本文中的org.apache.hadoop.hbase.filter.FilterList.addFilter方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。