当前位置: 首页>>代码示例>>Java>>正文


Java KeyOnlyFilter类代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.filter.KeyOnlyFilter的典型用法代码示例。如果您正苦于以下问题:Java KeyOnlyFilter类的具体用法?Java KeyOnlyFilter怎么用?Java KeyOnlyFilter使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


KeyOnlyFilter类属于org.apache.hadoop.hbase.filter包,在下文中一共展示了KeyOnlyFilter类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: doAction

import org.apache.hadoop.hbase.filter.KeyOnlyFilter; //导入依赖的package包/类
@Override
protected boolean doAction() throws Exception {
  ResultScanner rs = null;
  try {
    Scan s = new Scan();
    s.setBatch(2);
    s.addFamily(FAMILY);
    s.setFilter(new KeyOnlyFilter());
    s.setMaxVersions(1);

    rs = table.getScanner(s);
    Result result = rs.next();
    return result != null && result.size() > 0;
  } finally {
    if (rs != null) {
      rs.close();
    }
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:20,代码来源:IntegrationTestMTTR.java

示例2: collectReportMetadataViaDirectScan

import org.apache.hadoop.hbase.filter.KeyOnlyFilter; //导入依赖的package包/类
/**
 * Note that outputting invalid column metadata to an HBase table is intended to make for
 * easy implementation in a distributed mapreduce version of this procedure.
 *
 * @throws IOException if a remote or network exception occurs
 */
private void collectReportMetadataViaDirectScan() throws IOException {
  // perform full scan (w/ KeyOnlyFilter(true) if summary report)
  Scan scan = new Scan();
  if (!verboseReport && !reportType.equals(ReportType.VALUE)) {
    scan.setFilter(new KeyOnlyFilter(true));
  }
  if (includeAllCells) {
    scan.setMaxVersions();
  }
  if (sourceColFamily != null) {
    scan.addFamily(sourceColFamily);
  }
  try (ResultScanner rows = sourceTable.getScanner(scan)) {
    for (Result row : rows) {
      doSourceRowProcessing(row);
    }
  }
}
 
开发者ID:dvimont,项目名称:ColumnManagerForHBase,代码行数:25,代码来源:ColumnInvalidityReport.java

示例3: doAction

import org.apache.hadoop.hbase.filter.KeyOnlyFilter; //导入依赖的package包/类
@Override
protected boolean doAction() throws Exception {
  ResultScanner rs = null;
  try {
  Scan s = new Scan();
  s.setBatch(2);
  s.addFamily(FAMILY);
  s.setFilter(new KeyOnlyFilter());
  s.setMaxVersions(1);

  rs = table.getScanner(s);
  Result result = rs.next();
  return rs != null && result != null && result.size() > 0;
  } finally {
    if (rs != null) {
      rs.close();
    }
  }
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:20,代码来源:IntegrationTestMTTR.java

示例4: isFilterSupported

import org.apache.hadoop.hbase.filter.KeyOnlyFilter; //导入依赖的package包/类
@Override
public FilterSupportStatus isFilterSupported(
    FilterAdapterContext context, KeyOnlyFilter filter) {
  // We don't support replacing the value of a stripped cell with
  // the its length (8-byte-big-endian). The KeyOnlyFilter supports this
  // via a constructor parameter that is not exposed via a getLengthAsValue().
  // In order to find out if this constructor parameter was set,
  // we perform a test transformation. If the test transformation
  // has a cell value length that is not 0 bytes, we know the
  // unsupported constructor param was passed:
  if (filter.transformCell(TEST_CELL).getValueLength() != 0) {
    return FilterSupportStatus.newNotSupported(
        "KeyOnlyFilters with lenAsVal = true are not supported");
  }
  return FilterSupportStatus.SUPPORTED;
}
 
开发者ID:dmmcerlean,项目名称:cloud-bigtable-client,代码行数:17,代码来源:KeyOnlyFilterAdapter.java

示例5: isReallyEmptyRegion

import org.apache.hadoop.hbase.filter.KeyOnlyFilter; //导入依赖的package包/类
public static boolean isReallyEmptyRegion(HConnection connection,
    String tableName, HRegionInfo regionInfo) throws IOException {
    boolean emptyRegion = false;
    // verify really empty region by scanning records
    try (HTableInterface table = connection.getTable(tableName)) {
        Scan scan = new Scan(regionInfo.getStartKey(), regionInfo.getEndKey());
        FilterList filterList = new FilterList();
        filterList.addFilter(new KeyOnlyFilter());
        filterList.addFilter(new FirstKeyOnlyFilter());
        scan.setFilter(filterList);
        scan.setCacheBlocks(false);
        scan.setSmall(true);
        scan.setCaching(1);

        try (ResultScanner scanner = table.getScanner(scan)) {
            if (scanner.next() == null) emptyRegion = true;
        }
    }
    return emptyRegion;
}
 
开发者ID:kakao,项目名称:hbase-tools,代码行数:21,代码来源:CommandAdapter.java

示例6: run

import org.apache.hadoop.hbase.filter.KeyOnlyFilter; //导入依赖的package包/类
@Override
public void run() {
    try (HTableInterface table = connection.getTable(tableName.getBytes())) {
        // Do not use Get not to increase read request count metric.
        // Use Scan.
        Scan scan = new Scan("".getBytes(), "".getBytes());
        FilterList filterList = new FilterList();
        filterList.addFilter(new KeyOnlyFilter());
        filterList.addFilter(new FirstKeyOnlyFilter());
        scan.setFilter(filterList);
        //noinspection EmptyTryBlock
        try(ResultScanner ignored = table.getScanner(scan)) {
        }
        return;
    } catch (IOException ignore) {
    }

    clean(tableName);
}
 
开发者ID:kakao,项目名称:hbase-tools,代码行数:20,代码来源:RegionLocationCleaner.java

示例7: getByScan

import org.apache.hadoop.hbase.filter.KeyOnlyFilter; //导入依赖的package包/类
private Result getByScan(String path, byte[] family, byte[] column) throws IOException {
    byte[] startRow = Bytes.toBytes(path);
    byte[] endRow = plusZero(startRow);

    Scan scan = new Scan(startRow, endRow);
    if (family == null || column == null) {
        scan.setFilter(new KeyOnlyFilter());
    } else {
        scan.addColumn(family, column);
    }

    HTableInterface table = getConnection().getTable(getAllInOneTableName());
    try {
        ResultScanner scanner = table.getScanner(scan);
        Result result = null;
        for (Result r : scanner) {
            result = r;
        }
        return result == null || result.isEmpty() ? null : result;
    } finally {
        IOUtils.closeQuietly(table);
    }
}
 
开发者ID:KylinOLAP,项目名称:Kylin,代码行数:24,代码来源:HBaseResourceStore.java

示例8: getEdgeCount

import org.apache.hadoop.hbase.filter.KeyOnlyFilter; //导入依赖的package包/类
protected long getEdgeCount(final Vertex vertex) {
  long count = 0;
  Validate.notNull(vertex, "vertex shall always not be null");
  EdgeIterable edgeIt = getEdgeIterable(vertex, new GenScanStrategy() {
    @Override
    public Scan getScan() {
      Scan scan = new Scan();
      scan.setStartRow(Bytes.toBytes(vertex.getId() + HBaseGraphConstants.HBASE_GRAPH_TABLE_EDGE_DELIMITER_1));
      scan.setStopRow(Bytes.toBytes(vertex.getId() + "~"));
      scan.setFilter(new KeyOnlyFilter());
      return scan;
    }
  });
  
  Iterator<com.tinkerpop.blueprints.Edge> edgeIte = edgeIt.iterator();
  while(edgeIte.hasNext()) {
    edgeIte.next();
    count++;
  }
  return count;
}
 
开发者ID:trendmicro,项目名称:HGraph,代码行数:22,代码来源:Graph.java

示例9: testKeyOnlyFilter

import org.apache.hadoop.hbase.filter.KeyOnlyFilter; //导入依赖的package包/类
@Test
public void testKeyOnlyFilter() throws Exception {
  byte [] TABLE = Bytes.toBytes("testKeyOnlyFilter");
  Table ht = TEST_UTIL.createTable(TABLE, FAMILY);
  byte [][] ROWS = makeN(ROW, 10);
  byte [][] QUALIFIERS = {
      Bytes.toBytes("col0-<d2v1>-<d3v2>"), Bytes.toBytes("col1-<d2v1>-<d3v2>"),
      Bytes.toBytes("col2-<d2v1>-<d3v2>"), Bytes.toBytes("col3-<d2v1>-<d3v2>"),
      Bytes.toBytes("col4-<d2v1>-<d3v2>"), Bytes.toBytes("col5-<d2v1>-<d3v2>"),
      Bytes.toBytes("col6-<d2v1>-<d3v2>"), Bytes.toBytes("col7-<d2v1>-<d3v2>"),
      Bytes.toBytes("col8-<d2v1>-<d3v2>"), Bytes.toBytes("col9-<d2v1>-<d3v2>")
  };
  for(int i=0;i<10;i++) {
    Put put = new Put(ROWS[i]);
    put.setDurability(Durability.SKIP_WAL);
    put.add(FAMILY, QUALIFIERS[i], VALUE);
    ht.put(put);
  }
  Scan scan = new Scan();
  scan.addFamily(FAMILY);
  Filter filter = new KeyOnlyFilter(true);
  scan.setFilter(filter);
  ResultScanner scanner = ht.getScanner(scan);
  int count = 0;
  for(Result result : ht.getScanner(scan)) {
    assertEquals(result.size(), 1);
    assertEquals(result.rawCells()[0].getValueLength(), Bytes.SIZEOF_INT);
    assertEquals(Bytes.toInt(CellUtil.cloneValue(result.rawCells()[0])), VALUE.length);
    count++;
  }
  assertEquals(count, 10);
  scanner.close();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:34,代码来源:TestFromClientSide.java

示例10: testKeyOnlyFilterWithReverseScan

import org.apache.hadoop.hbase.filter.KeyOnlyFilter; //导入依赖的package包/类
@Test
public void testKeyOnlyFilterWithReverseScan() throws Exception {
  TableName TABLE = TableName.valueOf("testKeyOnlyFilterWithReverseScan");
  Table ht = TEST_UTIL.createTable(TABLE, FAMILY);
  byte[][] ROWS = makeN(ROW, 10);
  byte[][] QUALIFIERS = { Bytes.toBytes("col0-<d2v1>-<d3v2>"),
      Bytes.toBytes("col1-<d2v1>-<d3v2>"),
      Bytes.toBytes("col2-<d2v1>-<d3v2>"),
      Bytes.toBytes("col3-<d2v1>-<d3v2>"),
      Bytes.toBytes("col4-<d2v1>-<d3v2>"),
      Bytes.toBytes("col5-<d2v1>-<d3v2>"),
      Bytes.toBytes("col6-<d2v1>-<d3v2>"),
      Bytes.toBytes("col7-<d2v1>-<d3v2>"),
      Bytes.toBytes("col8-<d2v1>-<d3v2>"),
      Bytes.toBytes("col9-<d2v1>-<d3v2>") };
  for (int i = 0; i < 10; i++) {
    Put put = new Put(ROWS[i]);
    put.add(FAMILY, QUALIFIERS[i], VALUE);
    ht.put(put);
  }
  Scan scan = new Scan();
  scan.setReversed(true);
  scan.addFamily(FAMILY);
  Filter filter = new KeyOnlyFilter(true);
  scan.setFilter(filter);
  ResultScanner scanner = ht.getScanner(scan);
  int count = 0;
  for (Result result : ht.getScanner(scan)) {
    assertEquals(result.size(), 1);
    assertEquals(result.raw()[0].getValueLength(), Bytes.SIZEOF_INT);
    assertEquals(Bytes.toInt(result.raw()[0].getValue()), VALUE.length);
    count++;
  }
  assertEquals(count, 10);
  scanner.close();
  ht.close();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:38,代码来源:TestFromClientSide.java

示例11: createSubmittableJob

import org.apache.hadoop.hbase.filter.KeyOnlyFilter; //导入依赖的package包/类
Job createSubmittableJob(final String[] args) throws IOException {
  Configuration configFromArgs = parseArguments(args);
  if (configFromArgs == null || sourceTableNameString == null) {
    return null;
  }
  getConf().addResource(configFromArgs);
  getConf().setBoolean(Repository.MAP_SPECULATIVE_CONF_KEY, true); // no redundant processing

  Job job = Job.getInstance(
          getConf(), getConf().get(Repository.JOB_NAME_CONF_KEY, sourceTableNameString));
  TableMapReduceUtil.addDependencyJars(job);
  Scan scan = new Scan();
  // note that user can override scan row-caching by setting TableInputFormat.SCAN_CACHEDROWS
  scan.setCaching(getConf().getInt(TableInputFormat.SCAN_CACHEDROWS, 500));
  scan.setCacheBlocks(false);  // should be false for MapReduce jobs

  if (!verboseReport && !reportType.equals(ReportType.VALUE)) {
    scan.setFilter(new KeyOnlyFilter(true));
  }
  if (includeAllCells) {
    scan.setMaxVersions();
  }
  if (sourceColFamily != null) {
    scan.addFamily(sourceColFamily);
  }
  TableMapReduceUtil.initTableMapperJob(sourceTableNameString,
          scan,
          ColumnInvalidityReportMapper.class,
          null,  // mapper output key is null
          null,  // mapper output value is null
          job);
  job.setOutputFormatClass(NullOutputFormat.class);   // no Mapper output, no Reducer

  return job;
}
 
开发者ID:dvimont,项目名称:ColumnManagerForHBase,代码行数:36,代码来源:ColumnInvalidityReport.java

示例12: createSubmittableJob

import org.apache.hadoop.hbase.filter.KeyOnlyFilter; //导入依赖的package包/类
Job createSubmittableJob(final String[] args) throws IOException {
  if (!parseArguments(args)) {
    return null;
  }
  getConf().setBoolean(Repository.MAP_SPECULATIVE_CONF_KEY, true); // no redundant processing
  getConf().set(Repository.TABLE_NAME_CONF_KEY, sourceTableNameString);
  Job job = Job.getInstance(
          getConf(), getConf().get(Repository.JOB_NAME_CONF_KEY, sourceTableNameString));
  TableMapReduceUtil.addDependencyJars(job);
  Scan scan = new Scan();
  // note that user can override scan row-caching by setting TableInputFormat.SCAN_CACHEDROWS
  scan.setCaching(getConf().getInt(TableInputFormat.SCAN_CACHEDROWS, 500));
  scan.setCacheBlocks(false);  // should be false for scanning in MapReduce jobs
  scan.setFilter(new KeyOnlyFilter(true));
  if (includeAllCells) {
    scan.setMaxVersions();
  }
  TableMapReduceUtil.initTableMapperJob(
          sourceTableNameString,
          scan,
          ColumnDiscoveryMapper.class,
          null,  // mapper output key is null
          null,  // mapper output value is null
          job);
  job.setOutputFormatClass(NullOutputFormat.class);   // no Mapper output, no Reducer

  return job;
}
 
开发者ID:dvimont,项目名称:ColumnManagerForHBase,代码行数:29,代码来源:ColumnDiscoveryTool.java

示例13: run

import org.apache.hadoop.hbase.filter.KeyOnlyFilter; //导入依赖的package包/类
public void run(){
	finished=false;
	running=true;
	try {
		HTable table=new HTable(conf,tableName);
		Scan scan=new Scan();
		scan.setCacheBlocks(false);
		scan.setMaxVersions(1);
		scan.setCaching(1000);
		scan.setStartRow(region.getStartKey());
		scan.setStopRow(region.getEndKey());
		
		FilterList flist=new FilterList();
		flist.addFilter(new KeyOnlyFilter());
		flist.addFilter(new FirstKeyOnlyFilter());
		scan.setFilter(flist);
		
		ResultScanner rs=table.getScanner(scan);
		while((rs.next())!=null){
			count++;
		}
		
	} catch (IOException e) {
		e.printStackTrace();
	}finally{
		finished=true;
		running=false;
	}

}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:31,代码来源:ParallelCount.java

示例14: testKeyOnlyFilter

import org.apache.hadoop.hbase.filter.KeyOnlyFilter; //导入依赖的package包/类
@Test
public void testKeyOnlyFilter() throws Exception {
  byte [] TABLE = Bytes.toBytes("testKeyOnlyFilter");
  HTable ht = TEST_UTIL.createTable(TABLE, FAMILY);
  byte [][] ROWS = makeN(ROW, 10);
  byte [][] QUALIFIERS = {
      Bytes.toBytes("col0-<d2v1>-<d3v2>"), Bytes.toBytes("col1-<d2v1>-<d3v2>"),
      Bytes.toBytes("col2-<d2v1>-<d3v2>"), Bytes.toBytes("col3-<d2v1>-<d3v2>"),
      Bytes.toBytes("col4-<d2v1>-<d3v2>"), Bytes.toBytes("col5-<d2v1>-<d3v2>"),
      Bytes.toBytes("col6-<d2v1>-<d3v2>"), Bytes.toBytes("col7-<d2v1>-<d3v2>"),
      Bytes.toBytes("col8-<d2v1>-<d3v2>"), Bytes.toBytes("col9-<d2v1>-<d3v2>")
  };
  for(int i=0;i<10;i++) {
    Put put = new Put(ROWS[i]);
    put.setWriteToWAL(false);
    put.add(FAMILY, QUALIFIERS[i], VALUE);
    ht.put(put);
  }
  Scan scan = new Scan();
  scan.addFamily(FAMILY);
  Filter filter = new KeyOnlyFilter(true);
  scan.setFilter(filter);
  ResultScanner scanner = ht.getScanner(scan);
  int count = 0;
  for(Result result : ht.getScanner(scan)) {
    assertEquals(result.size(), 1);
    assertEquals(result.raw()[0].getValueLength(), Bytes.SIZEOF_INT);
    assertEquals(Bytes.toInt(result.raw()[0].getValue()), VALUE.length);
    count++;
  }
  assertEquals(count, 10);
  scanner.close();
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:34,代码来源:TestFromClientSide.java

示例15: adapt

import org.apache.hadoop.hbase.filter.KeyOnlyFilter; //导入依赖的package包/类
@Override
public RowFilter adapt(FilterAdapterContext context, KeyOnlyFilter filter)
    throws IOException {
  return RowFilter.newBuilder()
      .setStripValueTransformer(true)
      .build();
}
 
开发者ID:dmmcerlean,项目名称:cloud-bigtable-client,代码行数:8,代码来源:KeyOnlyFilterAdapter.java


注:本文中的org.apache.hadoop.hbase.filter.KeyOnlyFilter类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。