当前位置: 首页>>代码示例>>Java>>正文


Java FilterBase类代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.filter.FilterBase的典型用法代码示例。如果您正苦于以下问题:Java FilterBase类的具体用法?Java FilterBase怎么用?Java FilterBase使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


FilterBase类属于org.apache.hadoop.hbase.filter包,在下文中一共展示了FilterBase类的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: search

import org.apache.hadoop.hbase.filter.FilterBase; //导入依赖的package包/类
public <T extends HBase>  List<T> search(List<byte[]> rowkeys, FilterBase filter, Class<? extends HBase> clazz) throws Exception{
    TableMeta tableMeta = getTableMeta(clazz);

    List<Get> gets = Lists.newArrayList();
    for (byte[] rowkey : rowkeys) {
        Get get = new Get(rowkey);
        if (null != filter) {
            get.setFilter(filter);
        }
        gets.add(get);
    }

    List<T> hBaseList = Lists.newArrayList();
    try (Table table = connection.getTable(tableMeta.getHtableName())){
        Result[] results = table.get(gets);
        for (Result rs: results){
            HBase hBase = tableMeta.parse(rs);
            if(hBase != null){
                hBaseList.add((T)hBase);
            }
        }
    }

    return hBaseList;
}
 
开发者ID:acupple,项目名称:hbase-fx,代码行数:26,代码来源:HFxClient.java

示例2: testFlushBeforeCompletingScanWoFilter

import org.apache.hadoop.hbase.filter.FilterBase; //导入依赖的package包/类
@Test
public void testFlushBeforeCompletingScanWoFilter() throws IOException, InterruptedException {
  final AtomicBoolean timeToGoNextRow = new AtomicBoolean(false);
  final int expectedSize = 3;
  testFlushBeforeCompletingScan(new MyListHook() {
    @Override
    public void hook(int currentSize) {
      if (currentSize == expectedSize - 1) {
        try {
          flushStore(store, id++);
          timeToGoNextRow.set(true);
        } catch (IOException e) {
          throw new RuntimeException(e);
        }
      }
    }
  }, new FilterBase() {
    @Override
    public Filter.ReturnCode filterCell(final Cell c) throws IOException {
      return ReturnCode.INCLUDE;
    }
  }, expectedSize);
}
 
开发者ID:apache,项目名称:hbase,代码行数:24,代码来源:TestHStore.java

示例3: testAddFilterAndArguments

import org.apache.hadoop.hbase.filter.FilterBase; //导入依赖的package包/类
/**
 * Test addFilterAndArguments method of Import This method set couple
 * parameters into Configuration
 */
@Test
public void testAddFilterAndArguments() throws IOException {
  Configuration configuration = new Configuration();

  List<String> args = new ArrayList<String>();
  args.add("param1");
  args.add("param2");

  Import.addFilterAndArguments(configuration, FilterBase.class, args);
  assertEquals("org.apache.hadoop.hbase.filter.FilterBase",
      configuration.get(Import.FILTER_CLASS_CONF_KEY));
  assertEquals("param1,param2", configuration.get(Import.FILTER_ARGS_CONF_KEY));
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:18,代码来源:TestImportExport.java

示例4: testAddFilterAndArguments

import org.apache.hadoop.hbase.filter.FilterBase; //导入依赖的package包/类
/**
 * Test addFilterAndArguments method of Import This method set couple
 * parameters into Configuration
 */
@Test
public void testAddFilterAndArguments() throws IOException {
  Configuration configuration = new Configuration();

  List<String> args = new ArrayList<String>();
  args.add("param1");
  args.add("param2");

  Import.addFilterAndArguments(configuration, FilterBase.class, args);
  assertEquals("org.apache.hadoop.hbase.filter.FilterBase", 
      configuration.get(Import.FILTER_CLASS_CONF_KEY));
  assertEquals("param1,param2", configuration.get(Import.FILTER_ARGS_CONF_KEY));
}
 
开发者ID:grokcoder,项目名称:pbase,代码行数:18,代码来源:TestImportExport.java

示例5: testFlushBeforeCompletingScanWithFilter

import org.apache.hadoop.hbase.filter.FilterBase; //导入依赖的package包/类
@Test
public void testFlushBeforeCompletingScanWithFilter() throws IOException, InterruptedException {
  final AtomicBoolean timeToGoNextRow = new AtomicBoolean(false);
  final int expectedSize = 2;
  testFlushBeforeCompletingScan(new MyListHook() {
    @Override
    public void hook(int currentSize) {
      if (currentSize == expectedSize - 1) {
        try {
          flushStore(store, id++);
          timeToGoNextRow.set(true);
        } catch (IOException e) {
          throw new RuntimeException(e);
        }
      }
    }
  }, new FilterBase() {
    @Override
    public Filter.ReturnCode filterCell(final Cell c) throws IOException {
      if (timeToGoNextRow.get()) {
        timeToGoNextRow.set(false);
        return ReturnCode.NEXT_ROW;
      } else {
        return ReturnCode.INCLUDE;
      }
    }
  }, expectedSize);
}
 
开发者ID:apache,项目名称:hbase,代码行数:29,代码来源:TestHStore.java

示例6: testFlushBeforeCompletingScanWithFilterHint

import org.apache.hadoop.hbase.filter.FilterBase; //导入依赖的package包/类
@Test
public void testFlushBeforeCompletingScanWithFilterHint() throws IOException,
    InterruptedException {
  final AtomicBoolean timeToGetHint = new AtomicBoolean(false);
  final int expectedSize = 2;
  testFlushBeforeCompletingScan(new MyListHook() {
    @Override
    public void hook(int currentSize) {
      if (currentSize == expectedSize - 1) {
        try {
          flushStore(store, id++);
          timeToGetHint.set(true);
        } catch (IOException e) {
          throw new RuntimeException(e);
        }
      }
    }
  }, new FilterBase() {
    @Override
    public Filter.ReturnCode filterCell(final Cell c) throws IOException {
      if (timeToGetHint.get()) {
        timeToGetHint.set(false);
        return Filter.ReturnCode.SEEK_NEXT_USING_HINT;
      } else {
        return Filter.ReturnCode.INCLUDE;
      }
    }
    @Override
    public Cell getNextCellHint(Cell currentCell) throws IOException {
      return currentCell;
    }
  }, expectedSize);
}
 
开发者ID:apache,项目名称:hbase,代码行数:34,代码来源:TestHStore.java

示例7: testAddFilterAndArguments

import org.apache.hadoop.hbase.filter.FilterBase; //导入依赖的package包/类
/**
 * Test addFilterAndArguments method of Import This method set couple
 * parameters into Configuration
 */
@Test
public void testAddFilterAndArguments() throws IOException {
  Configuration configuration = new Configuration();

  List<String> args = new ArrayList<>();
  args.add("param1");
  args.add("param2");

  Import.addFilterAndArguments(configuration, FilterBase.class, args);
  assertEquals("org.apache.hadoop.hbase.filter.FilterBase",
      configuration.get(Import.FILTER_CLASS_CONF_KEY));
  assertEquals("param1,param2", configuration.get(Import.FILTER_ARGS_CONF_KEY));
}
 
开发者ID:apache,项目名称:hbase,代码行数:18,代码来源:TestImportExport.java

示例8: RegionScannerImpl

import org.apache.hadoop.hbase.filter.FilterBase; //导入依赖的package包/类
RegionScannerImpl(Scan scan, List<KeyValueScanner> additionalScanners, HRegion region)
    throws IOException {
  // DebugPrint.println("HRegionScanner.<init>");
  this.region = region;
  this.filter = scan.getFilter();
  this.batch = scan.getBatch();
  if (Bytes.equals(scan.getStopRow(), HConstants.EMPTY_END_ROW)) {
    this.stopRow = null;
  } else {
    this.stopRow = scan.getStopRow();
  }
  // If we are doing a get, we want to be [startRow,endRow] normally
  // it is [startRow,endRow) and if startRow=endRow we get nothing.
  this.isScan = scan.isGetScan() ? -1 : 0;

  // synchronize on scannerReadPoints so that nobody calculates
  // getSmallestReadPoint, before scannerReadPoints is updated.
  IsolationLevel isolationLevel = scan.getIsolationLevel();
  synchronized(scannerReadPoints) {
    if (isolationLevel == IsolationLevel.READ_UNCOMMITTED) {
      // This scan can read even uncommitted transactions
      this.readPt = Long.MAX_VALUE;
      MultiVersionConsistencyControl.setThreadReadPoint(this.readPt);
    } else {
      this.readPt = MultiVersionConsistencyControl.resetThreadReadPoint(mvcc);
    }
    scannerReadPoints.put(this, this.readPt);
  }

  // Here we separate all scanners into two lists - scanner that provide data required
  // by the filter to operate (scanners list) and all others (joinedScanners list).
  List<KeyValueScanner> scanners = new ArrayList<KeyValueScanner>();
  List<KeyValueScanner> joinedScanners = new ArrayList<KeyValueScanner>();
  if (additionalScanners != null) {
    scanners.addAll(additionalScanners);
  }

  for (Map.Entry<byte[], NavigableSet<byte[]>> entry :
      scan.getFamilyMap().entrySet()) {
    Store store = stores.get(entry.getKey());
    KeyValueScanner scanner = store.getScanner(scan, entry.getValue());
    if (this.filter == null || !scan.doLoadColumnFamiliesOnDemand()
      || FilterBase.isFamilyEssential(this.filter, entry.getKey())) {
      scanners.add(scanner);
    } else {
      joinedScanners.add(scanner);
    }
  }
  this.storeHeap = new KeyValueHeap(scanners, comparator);
  if (!joinedScanners.isEmpty()) {
    this.joinedHeap = new KeyValueHeap(joinedScanners, comparator);
  }
  
  // whether to use index
  byte[] tmpvalue=scan.getAttribute(IndexConstants.SCAN_WITH_INDEX);
  if(tmpvalue!=null){        
    this.useIndex = Bytes.toBoolean(tmpvalue);
  }
  tmpvalue=scan.getAttribute(IndexConstants.MAX_SCAN_SCALE);
  float maxScale=IndexConstants.DEFAULT_MAX_SCAN_SCALE;
  if(tmpvalue!=null){
    maxScale = Bytes.toFloat(tmpvalue);
  }
  if (this.useIndex) {
    indexTree = ScanPreprocess.preprocess(this.region, scan.getFilter(), maxScale);
    if (indexTree!=null) {
      useIndex = true;
      long buildStartTime = System.currentTimeMillis();
      generateCandidateRows(scan);
      this.indexReadTime = (System.currentTimeMillis() - buildStartTime) - this.indexMergeTime - this.indexSortTime;
    } else {
      useIndex = false;
      LOG.debug("skip using index");
    }
  }
}
 
开发者ID:wanhao,项目名称:IRIndex,代码行数:77,代码来源:HRegion.java

示例9: RegionScannerImpl

import org.apache.hadoop.hbase.filter.FilterBase; //导入依赖的package包/类
RegionScannerImpl(Scan scan, List<KeyValueScanner> additionalScanners, HRegion region)
    throws IOException {
  // DebugPrint.println("HRegionScanner.<init>");
  this.region = region;
  this.filter = scan.getFilter();
  this.batch = scan.getBatch();
  if (Bytes.equals(scan.getStopRow(), HConstants.EMPTY_END_ROW)) {
    this.stopRow = null;
  } else {
    this.stopRow = scan.getStopRow();
  }
  // If we are doing a get, we want to be [startRow,endRow] normally
  // it is [startRow,endRow) and if startRow=endRow we get nothing.
  this.isScan = scan.isGetScan() ? -1 : 0;

  // synchronize on scannerReadPoints so that nobody calculates
  // getSmallestReadPoint, before scannerReadPoints is updated.
  IsolationLevel isolationLevel = scan.getIsolationLevel();
  synchronized(scannerReadPoints) {
    if (isolationLevel == IsolationLevel.READ_UNCOMMITTED) {
      // This scan can read even uncommitted transactions
      this.readPt = Long.MAX_VALUE;
      MultiVersionConsistencyControl.setThreadReadPoint(this.readPt);
    } else {
      this.readPt = MultiVersionConsistencyControl.resetThreadReadPoint(mvcc);
    }
    scannerReadPoints.put(this, this.readPt);
  }

  // Here we separate all scanners into two lists - scanner that provide data required
  // by the filter to operate (scanners list) and all others (joinedScanners list).
  List<KeyValueScanner> scanners = new ArrayList<KeyValueScanner>();
  List<KeyValueScanner> joinedScanners = new ArrayList<KeyValueScanner>();
  if (additionalScanners != null) {
    scanners.addAll(additionalScanners);
  }

  for (Map.Entry<byte[], NavigableSet<byte[]>> entry :
      scan.getFamilyMap().entrySet()) {
    Store store = stores.get(entry.getKey());
    KeyValueScanner scanner = store.getScanner(scan, entry.getValue());
    if (this.filter == null || !scan.doLoadColumnFamiliesOnDemand()
      || FilterBase.isFamilyEssential(this.filter, entry.getKey())) {
      scanners.add(scanner);
    } else {
      joinedScanners.add(scanner);
    }
  }
  this.storeHeap = new KeyValueHeap(scanners, comparator);
  if (!joinedScanners.isEmpty()) {
    this.joinedHeap = new KeyValueHeap(joinedScanners, comparator);
  }
}
 
开发者ID:zwqjsj0404,项目名称:HBase-Research,代码行数:54,代码来源:HRegion.java

示例10: wrapWithDeleteFilter

import org.apache.hadoop.hbase.filter.FilterBase; //导入依赖的package包/类
private Scan wrapWithDeleteFilter(final Scan scan, final TransactionState state) {
    FilterBase deleteFilter = new FilterBase() {

        private boolean rowFiltered = false;

        @Override
        public void reset() {
            rowFiltered = false;
        }

        @Override
        public boolean hasFilterRow() {
            return true;
        }

        @Override
        public void filterRow(final List<KeyValue> kvs) {
            state.applyDeletes(kvs, scan.getTimeRange().getMin(), scan.getTimeRange().getMax());
            rowFiltered = kvs.isEmpty();
        }

        @Override
        public boolean filterRow() {
            return rowFiltered;
        }

        @Override
        public void write(final DataOutput out) throws IOException {
            // does nothing
        }

        @Override
        public void readFields(final DataInput in) throws IOException {
            // does nothing
        }
    };

    if (scan.getFilter() == null) {
        scan.setFilter(deleteFilter);
        return scan;
    }

    FilterList wrappedFilter = new FilterList(Arrays.asList(deleteFilter, scan.getFilter()));
    scan.setFilter(wrappedFilter);
    return scan;
}
 
开发者ID:mayanhui,项目名称:hbase-secondary-index,代码行数:47,代码来源:TransactionalRegion.java


注:本文中的org.apache.hadoop.hbase.filter.FilterBase类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。