本文整理汇总了Java中org.apache.hadoop.hbase.filter.KeyOnlyFilter类的典型用法代码示例。如果您正苦于以下问题:Java KeyOnlyFilter类的具体用法?Java KeyOnlyFilter怎么用?Java KeyOnlyFilter使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
KeyOnlyFilter类属于org.apache.hadoop.hbase.filter包,在下文中一共展示了KeyOnlyFilter类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: doAction
import org.apache.hadoop.hbase.filter.KeyOnlyFilter; //导入依赖的package包/类
@Override
protected boolean doAction() throws Exception {
ResultScanner rs = null;
try {
Scan s = new Scan();
s.setBatch(2);
s.addFamily(FAMILY);
s.setFilter(new KeyOnlyFilter());
s.setMaxVersions(1);
rs = table.getScanner(s);
Result result = rs.next();
return result != null && result.size() > 0;
} finally {
if (rs != null) {
rs.close();
}
}
}
示例2: collectReportMetadataViaDirectScan
import org.apache.hadoop.hbase.filter.KeyOnlyFilter; //导入依赖的package包/类
/**
* Note that outputting invalid column metadata to an HBase table is intended to make for
* easy implementation in a distributed mapreduce version of this procedure.
*
* @throws IOException if a remote or network exception occurs
*/
private void collectReportMetadataViaDirectScan() throws IOException {
// perform full scan (w/ KeyOnlyFilter(true) if summary report)
Scan scan = new Scan();
if (!verboseReport && !reportType.equals(ReportType.VALUE)) {
scan.setFilter(new KeyOnlyFilter(true));
}
if (includeAllCells) {
scan.setMaxVersions();
}
if (sourceColFamily != null) {
scan.addFamily(sourceColFamily);
}
try (ResultScanner rows = sourceTable.getScanner(scan)) {
for (Result row : rows) {
doSourceRowProcessing(row);
}
}
}
示例3: doAction
import org.apache.hadoop.hbase.filter.KeyOnlyFilter; //导入依赖的package包/类
@Override
protected boolean doAction() throws Exception {
ResultScanner rs = null;
try {
Scan s = new Scan();
s.setBatch(2);
s.addFamily(FAMILY);
s.setFilter(new KeyOnlyFilter());
s.setMaxVersions(1);
rs = table.getScanner(s);
Result result = rs.next();
return rs != null && result != null && result.size() > 0;
} finally {
if (rs != null) {
rs.close();
}
}
}
示例4: isFilterSupported
import org.apache.hadoop.hbase.filter.KeyOnlyFilter; //导入依赖的package包/类
@Override
public FilterSupportStatus isFilterSupported(
FilterAdapterContext context, KeyOnlyFilter filter) {
// We don't support replacing the value of a stripped cell with
// the its length (8-byte-big-endian). The KeyOnlyFilter supports this
// via a constructor parameter that is not exposed via a getLengthAsValue().
// In order to find out if this constructor parameter was set,
// we perform a test transformation. If the test transformation
// has a cell value length that is not 0 bytes, we know the
// unsupported constructor param was passed:
if (filter.transformCell(TEST_CELL).getValueLength() != 0) {
return FilterSupportStatus.newNotSupported(
"KeyOnlyFilters with lenAsVal = true are not supported");
}
return FilterSupportStatus.SUPPORTED;
}
示例5: isReallyEmptyRegion
import org.apache.hadoop.hbase.filter.KeyOnlyFilter; //导入依赖的package包/类
public static boolean isReallyEmptyRegion(HConnection connection,
String tableName, HRegionInfo regionInfo) throws IOException {
boolean emptyRegion = false;
// verify really empty region by scanning records
try (HTableInterface table = connection.getTable(tableName)) {
Scan scan = new Scan(regionInfo.getStartKey(), regionInfo.getEndKey());
FilterList filterList = new FilterList();
filterList.addFilter(new KeyOnlyFilter());
filterList.addFilter(new FirstKeyOnlyFilter());
scan.setFilter(filterList);
scan.setCacheBlocks(false);
scan.setSmall(true);
scan.setCaching(1);
try (ResultScanner scanner = table.getScanner(scan)) {
if (scanner.next() == null) emptyRegion = true;
}
}
return emptyRegion;
}
示例6: run
import org.apache.hadoop.hbase.filter.KeyOnlyFilter; //导入依赖的package包/类
@Override
public void run() {
try (HTableInterface table = connection.getTable(tableName.getBytes())) {
// Do not use Get not to increase read request count metric.
// Use Scan.
Scan scan = new Scan("".getBytes(), "".getBytes());
FilterList filterList = new FilterList();
filterList.addFilter(new KeyOnlyFilter());
filterList.addFilter(new FirstKeyOnlyFilter());
scan.setFilter(filterList);
//noinspection EmptyTryBlock
try(ResultScanner ignored = table.getScanner(scan)) {
}
return;
} catch (IOException ignore) {
}
clean(tableName);
}
示例7: getByScan
import org.apache.hadoop.hbase.filter.KeyOnlyFilter; //导入依赖的package包/类
private Result getByScan(String path, byte[] family, byte[] column) throws IOException {
byte[] startRow = Bytes.toBytes(path);
byte[] endRow = plusZero(startRow);
Scan scan = new Scan(startRow, endRow);
if (family == null || column == null) {
scan.setFilter(new KeyOnlyFilter());
} else {
scan.addColumn(family, column);
}
HTableInterface table = getConnection().getTable(getAllInOneTableName());
try {
ResultScanner scanner = table.getScanner(scan);
Result result = null;
for (Result r : scanner) {
result = r;
}
return result == null || result.isEmpty() ? null : result;
} finally {
IOUtils.closeQuietly(table);
}
}
示例8: getEdgeCount
import org.apache.hadoop.hbase.filter.KeyOnlyFilter; //导入依赖的package包/类
protected long getEdgeCount(final Vertex vertex) {
long count = 0;
Validate.notNull(vertex, "vertex shall always not be null");
EdgeIterable edgeIt = getEdgeIterable(vertex, new GenScanStrategy() {
@Override
public Scan getScan() {
Scan scan = new Scan();
scan.setStartRow(Bytes.toBytes(vertex.getId() + HBaseGraphConstants.HBASE_GRAPH_TABLE_EDGE_DELIMITER_1));
scan.setStopRow(Bytes.toBytes(vertex.getId() + "~"));
scan.setFilter(new KeyOnlyFilter());
return scan;
}
});
Iterator<com.tinkerpop.blueprints.Edge> edgeIte = edgeIt.iterator();
while(edgeIte.hasNext()) {
edgeIte.next();
count++;
}
return count;
}
示例9: testKeyOnlyFilter
import org.apache.hadoop.hbase.filter.KeyOnlyFilter; //导入依赖的package包/类
@Test
public void testKeyOnlyFilter() throws Exception {
byte [] TABLE = Bytes.toBytes("testKeyOnlyFilter");
Table ht = TEST_UTIL.createTable(TABLE, FAMILY);
byte [][] ROWS = makeN(ROW, 10);
byte [][] QUALIFIERS = {
Bytes.toBytes("col0-<d2v1>-<d3v2>"), Bytes.toBytes("col1-<d2v1>-<d3v2>"),
Bytes.toBytes("col2-<d2v1>-<d3v2>"), Bytes.toBytes("col3-<d2v1>-<d3v2>"),
Bytes.toBytes("col4-<d2v1>-<d3v2>"), Bytes.toBytes("col5-<d2v1>-<d3v2>"),
Bytes.toBytes("col6-<d2v1>-<d3v2>"), Bytes.toBytes("col7-<d2v1>-<d3v2>"),
Bytes.toBytes("col8-<d2v1>-<d3v2>"), Bytes.toBytes("col9-<d2v1>-<d3v2>")
};
for(int i=0;i<10;i++) {
Put put = new Put(ROWS[i]);
put.setDurability(Durability.SKIP_WAL);
put.add(FAMILY, QUALIFIERS[i], VALUE);
ht.put(put);
}
Scan scan = new Scan();
scan.addFamily(FAMILY);
Filter filter = new KeyOnlyFilter(true);
scan.setFilter(filter);
ResultScanner scanner = ht.getScanner(scan);
int count = 0;
for(Result result : ht.getScanner(scan)) {
assertEquals(result.size(), 1);
assertEquals(result.rawCells()[0].getValueLength(), Bytes.SIZEOF_INT);
assertEquals(Bytes.toInt(CellUtil.cloneValue(result.rawCells()[0])), VALUE.length);
count++;
}
assertEquals(count, 10);
scanner.close();
}
示例10: testKeyOnlyFilterWithReverseScan
import org.apache.hadoop.hbase.filter.KeyOnlyFilter; //导入依赖的package包/类
@Test
public void testKeyOnlyFilterWithReverseScan() throws Exception {
TableName TABLE = TableName.valueOf("testKeyOnlyFilterWithReverseScan");
Table ht = TEST_UTIL.createTable(TABLE, FAMILY);
byte[][] ROWS = makeN(ROW, 10);
byte[][] QUALIFIERS = { Bytes.toBytes("col0-<d2v1>-<d3v2>"),
Bytes.toBytes("col1-<d2v1>-<d3v2>"),
Bytes.toBytes("col2-<d2v1>-<d3v2>"),
Bytes.toBytes("col3-<d2v1>-<d3v2>"),
Bytes.toBytes("col4-<d2v1>-<d3v2>"),
Bytes.toBytes("col5-<d2v1>-<d3v2>"),
Bytes.toBytes("col6-<d2v1>-<d3v2>"),
Bytes.toBytes("col7-<d2v1>-<d3v2>"),
Bytes.toBytes("col8-<d2v1>-<d3v2>"),
Bytes.toBytes("col9-<d2v1>-<d3v2>") };
for (int i = 0; i < 10; i++) {
Put put = new Put(ROWS[i]);
put.add(FAMILY, QUALIFIERS[i], VALUE);
ht.put(put);
}
Scan scan = new Scan();
scan.setReversed(true);
scan.addFamily(FAMILY);
Filter filter = new KeyOnlyFilter(true);
scan.setFilter(filter);
ResultScanner scanner = ht.getScanner(scan);
int count = 0;
for (Result result : ht.getScanner(scan)) {
assertEquals(result.size(), 1);
assertEquals(result.raw()[0].getValueLength(), Bytes.SIZEOF_INT);
assertEquals(Bytes.toInt(result.raw()[0].getValue()), VALUE.length);
count++;
}
assertEquals(count, 10);
scanner.close();
ht.close();
}
示例11: createSubmittableJob
import org.apache.hadoop.hbase.filter.KeyOnlyFilter; //导入依赖的package包/类
Job createSubmittableJob(final String[] args) throws IOException {
Configuration configFromArgs = parseArguments(args);
if (configFromArgs == null || sourceTableNameString == null) {
return null;
}
getConf().addResource(configFromArgs);
getConf().setBoolean(Repository.MAP_SPECULATIVE_CONF_KEY, true); // no redundant processing
Job job = Job.getInstance(
getConf(), getConf().get(Repository.JOB_NAME_CONF_KEY, sourceTableNameString));
TableMapReduceUtil.addDependencyJars(job);
Scan scan = new Scan();
// note that user can override scan row-caching by setting TableInputFormat.SCAN_CACHEDROWS
scan.setCaching(getConf().getInt(TableInputFormat.SCAN_CACHEDROWS, 500));
scan.setCacheBlocks(false); // should be false for MapReduce jobs
if (!verboseReport && !reportType.equals(ReportType.VALUE)) {
scan.setFilter(new KeyOnlyFilter(true));
}
if (includeAllCells) {
scan.setMaxVersions();
}
if (sourceColFamily != null) {
scan.addFamily(sourceColFamily);
}
TableMapReduceUtil.initTableMapperJob(sourceTableNameString,
scan,
ColumnInvalidityReportMapper.class,
null, // mapper output key is null
null, // mapper output value is null
job);
job.setOutputFormatClass(NullOutputFormat.class); // no Mapper output, no Reducer
return job;
}
示例12: createSubmittableJob
import org.apache.hadoop.hbase.filter.KeyOnlyFilter; //导入依赖的package包/类
Job createSubmittableJob(final String[] args) throws IOException {
if (!parseArguments(args)) {
return null;
}
getConf().setBoolean(Repository.MAP_SPECULATIVE_CONF_KEY, true); // no redundant processing
getConf().set(Repository.TABLE_NAME_CONF_KEY, sourceTableNameString);
Job job = Job.getInstance(
getConf(), getConf().get(Repository.JOB_NAME_CONF_KEY, sourceTableNameString));
TableMapReduceUtil.addDependencyJars(job);
Scan scan = new Scan();
// note that user can override scan row-caching by setting TableInputFormat.SCAN_CACHEDROWS
scan.setCaching(getConf().getInt(TableInputFormat.SCAN_CACHEDROWS, 500));
scan.setCacheBlocks(false); // should be false for scanning in MapReduce jobs
scan.setFilter(new KeyOnlyFilter(true));
if (includeAllCells) {
scan.setMaxVersions();
}
TableMapReduceUtil.initTableMapperJob(
sourceTableNameString,
scan,
ColumnDiscoveryMapper.class,
null, // mapper output key is null
null, // mapper output value is null
job);
job.setOutputFormatClass(NullOutputFormat.class); // no Mapper output, no Reducer
return job;
}
示例13: run
import org.apache.hadoop.hbase.filter.KeyOnlyFilter; //导入依赖的package包/类
public void run(){
finished=false;
running=true;
try {
HTable table=new HTable(conf,tableName);
Scan scan=new Scan();
scan.setCacheBlocks(false);
scan.setMaxVersions(1);
scan.setCaching(1000);
scan.setStartRow(region.getStartKey());
scan.setStopRow(region.getEndKey());
FilterList flist=new FilterList();
flist.addFilter(new KeyOnlyFilter());
flist.addFilter(new FirstKeyOnlyFilter());
scan.setFilter(flist);
ResultScanner rs=table.getScanner(scan);
while((rs.next())!=null){
count++;
}
} catch (IOException e) {
e.printStackTrace();
}finally{
finished=true;
running=false;
}
}
示例14: testKeyOnlyFilter
import org.apache.hadoop.hbase.filter.KeyOnlyFilter; //导入依赖的package包/类
@Test
public void testKeyOnlyFilter() throws Exception {
byte [] TABLE = Bytes.toBytes("testKeyOnlyFilter");
HTable ht = TEST_UTIL.createTable(TABLE, FAMILY);
byte [][] ROWS = makeN(ROW, 10);
byte [][] QUALIFIERS = {
Bytes.toBytes("col0-<d2v1>-<d3v2>"), Bytes.toBytes("col1-<d2v1>-<d3v2>"),
Bytes.toBytes("col2-<d2v1>-<d3v2>"), Bytes.toBytes("col3-<d2v1>-<d3v2>"),
Bytes.toBytes("col4-<d2v1>-<d3v2>"), Bytes.toBytes("col5-<d2v1>-<d3v2>"),
Bytes.toBytes("col6-<d2v1>-<d3v2>"), Bytes.toBytes("col7-<d2v1>-<d3v2>"),
Bytes.toBytes("col8-<d2v1>-<d3v2>"), Bytes.toBytes("col9-<d2v1>-<d3v2>")
};
for(int i=0;i<10;i++) {
Put put = new Put(ROWS[i]);
put.setWriteToWAL(false);
put.add(FAMILY, QUALIFIERS[i], VALUE);
ht.put(put);
}
Scan scan = new Scan();
scan.addFamily(FAMILY);
Filter filter = new KeyOnlyFilter(true);
scan.setFilter(filter);
ResultScanner scanner = ht.getScanner(scan);
int count = 0;
for(Result result : ht.getScanner(scan)) {
assertEquals(result.size(), 1);
assertEquals(result.raw()[0].getValueLength(), Bytes.SIZEOF_INT);
assertEquals(Bytes.toInt(result.raw()[0].getValue()), VALUE.length);
count++;
}
assertEquals(count, 10);
scanner.close();
}
示例15: adapt
import org.apache.hadoop.hbase.filter.KeyOnlyFilter; //导入依赖的package包/类
@Override
public RowFilter adapt(FilterAdapterContext context, KeyOnlyFilter filter)
throws IOException {
return RowFilter.newBuilder()
.setStripValueTransformer(true)
.build();
}