本文整理汇总了Java中org.apache.hadoop.hbase.KeepDeletedCells类的典型用法代码示例。如果您正苦于以下问题:Java KeepDeletedCells类的具体用法?Java KeepDeletedCells怎么用?Java KeepDeletedCells使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
KeepDeletedCells类属于org.apache.hadoop.hbase包,在下文中一共展示了KeepDeletedCells类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: ScanInfo
import org.apache.hadoop.hbase.KeepDeletedCells; //导入依赖的package包/类
/**
* @param conf
* @param family Name of this store's column family
* @param minVersions Store's MIN_VERSIONS setting
* @param maxVersions Store's VERSIONS setting
* @param ttl Store's TTL (in ms)
* @param timeToPurgeDeletes duration in ms after which a delete marker can
* be purged during a major compaction.
* @param keepDeletedCells Store's keepDeletedCells setting
* @param comparator The store's comparator
*/
public ScanInfo(final Configuration conf, final byte[] family, final int minVersions,
final int maxVersions, final long ttl, final KeepDeletedCells keepDeletedCells,
final long timeToPurgeDeletes, final KVComparator comparator) {
this.family = family;
this.minVersions = minVersions;
this.maxVersions = maxVersions;
this.ttl = ttl;
this.keepDeletedCells = keepDeletedCells;
this.timeToPurgeDeletes = timeToPurgeDeletes;
this.comparator = comparator;
this.tableMaxRowSize =
conf.getLong(HConstants.TABLE_MAX_ROWSIZE_KEY, HConstants.TABLE_MAX_ROWSIZE_DEFAULT);
this.usePread = conf.getBoolean("hbase.storescanner.use.pread", false);
long perHeartbeat = conf.getLong(StoreScanner.HBASE_CELLS_SCANNED_PER_HEARTBEAT_CHECK,
StoreScanner.DEFAULT_HBASE_CELLS_SCANNED_PER_HEARTBEAT_CHECK);
this.cellsPerTimeoutCheck = perHeartbeat > 0 ?
perHeartbeat :
StoreScanner.DEFAULT_HBASE_CELLS_SCANNED_PER_HEARTBEAT_CHECK;
this.parallelSeekEnabled =
conf.getBoolean(StoreScanner.STORESCANNER_PARALLEL_SEEK_ENABLE, false);
this.conf = conf;
}
示例2: testRawScanWithColumns
import org.apache.hadoop.hbase.KeepDeletedCells; //导入依赖的package包/类
/**
* The ExplicitColumnTracker does not support "raw" scanning.
*/
@Test
public void testRawScanWithColumns() throws Exception {
HTableDescriptor htd = hbu.createTableDescriptor(name.getMethodName(), 0, 3,
HConstants.FOREVER, KeepDeletedCells.TRUE);
HRegion region = hbu.createLocalHRegion(htd, null, null);
Scan s = new Scan();
s.setRaw(true);
s.setMaxVersions();
s.addColumn(c0, c0);
try {
region.getScanner(s);
fail("raw scanner with columns should have failed");
} catch (org.apache.hadoop.hbase.DoNotRetryIOException dnre) {
// ok!
}
HRegion.closeHRegion(region);
}
示例3: testDropDeletes
import org.apache.hadoop.hbase.KeepDeletedCells; //导入依赖的package包/类
private void testDropDeletes(
byte[] from, byte[] to, byte[][] rows, MatchCode... expected) throws IOException {
long now = EnvironmentEdgeManager.currentTime();
// Set time to purge deletes to negative value to avoid it ever happening.
ScanInfo scanInfo = new ScanInfo(fam2, 0, 1, ttl, KeepDeletedCells.FALSE, -1L, rowComparator);
NavigableSet<byte[]> cols = get.getFamilyMap().get(fam2);
ScanQueryMatcher qm = new ScanQueryMatcher(scan, scanInfo, cols, Long.MAX_VALUE,
HConstants.OLDEST_TIMESTAMP, HConstants.OLDEST_TIMESTAMP, now, from, to, null);
List<ScanQueryMatcher.MatchCode> actual =
new ArrayList<ScanQueryMatcher.MatchCode>(rows.length);
byte[] prevRow = null;
for (byte[] row : rows) {
if (prevRow == null || !Bytes.equals(prevRow, row)) {
qm.setRow(row, 0, (short)row.length);
prevRow = row;
}
actual.add(qm.match(new KeyValue(row, fam2, null, now, Type.Delete)));
}
assertEquals(expected.length, actual.size());
for (int i = 0; i < expected.length; i++) {
if (PRINT) System.out.println("expected " + expected[i] + ", actual " + actual.get(i));
assertEquals(expected[i], actual.get(i));
}
}
示例4: tryDropDelete
import org.apache.hadoop.hbase.KeepDeletedCells; //导入依赖的package包/类
protected final MatchCode tryDropDelete(Cell cell) {
long timestamp = cell.getTimestamp();
// If it is not the time to drop the delete marker, just return
if (timeToPurgeDeletes > 0 && now - timestamp <= timeToPurgeDeletes) {
return MatchCode.INCLUDE;
}
if (keepDeletedCells == KeepDeletedCells.TRUE
|| (keepDeletedCells == KeepDeletedCells.TTL && timestamp >= oldestUnexpiredTS)) {
// If keepDeletedCell is true, or the delete marker is not expired yet, we should include it
// in version counting to see if we can drop it. The only exception is that, we can make
// sure that no put is older than this delete marker. And under this situation, all later
// cells of this column(must be delete markers) can be skipped.
if (timestamp < earliestPutTs) {
return columns.getNextRowOrNextColumn(cell);
} else {
return null;
}
} else {
return MatchCode.SKIP;
}
}
示例5: ScanInfo
import org.apache.hadoop.hbase.KeepDeletedCells; //导入依赖的package包/类
private ScanInfo(byte[] family, int minVersions, int maxVersions, long ttl,
KeepDeletedCells keepDeletedCells, long timeToPurgeDeletes, CellComparator comparator,
long tableMaxRowSize, boolean usePread, long cellsPerTimeoutCheck,
boolean parallelSeekEnabled, long preadMaxBytes, boolean newVersionBehavior) {
this.family = family;
this.minVersions = minVersions;
this.maxVersions = maxVersions;
this.ttl = ttl;
this.keepDeletedCells = keepDeletedCells;
this.timeToPurgeDeletes = timeToPurgeDeletes;
this.comparator = comparator;
this.tableMaxRowSize = tableMaxRowSize;
this.usePread = usePread;
this.cellsPerTimeoutCheck = cellsPerTimeoutCheck;
this.parallelSeekEnabled = parallelSeekEnabled;
this.preadMaxBytes = preadMaxBytes;
this.newVersionBehavior = newVersionBehavior;
}
示例6: testRawScanWithColumns
import org.apache.hadoop.hbase.KeepDeletedCells; //导入依赖的package包/类
/**
* The ExplicitColumnTracker does not support "raw" scanning.
*/
@Test
public void testRawScanWithColumns() throws Exception {
HTableDescriptor htd = hbu.createTableDescriptor(name.getMethodName(), 0, 3,
HConstants.FOREVER, KeepDeletedCells.TRUE);
Region region = hbu.createLocalHRegion(htd, null, null);
Scan s = new Scan();
s.setRaw(true);
s.setMaxVersions();
s.addColumn(c0, c0);
try {
region.getScanner(s);
fail("raw scanner with columns should have failed");
} catch (org.apache.hadoop.hbase.DoNotRetryIOException dnre) {
// ok!
}
HBaseTestingUtility.closeRegionAndWAL(region);
}
示例7: testDropDeletes
import org.apache.hadoop.hbase.KeepDeletedCells; //导入依赖的package包/类
private void testDropDeletes(byte[] from, byte[] to, byte[][] rows, MatchCode... expected)
throws IOException {
long now = EnvironmentEdgeManager.currentTime();
// Set time to purge deletes to negative value to avoid it ever happening.
ScanInfo scanInfo = new ScanInfo(this.conf, fam2, 0, 1, ttl, KeepDeletedCells.FALSE,
HConstants.DEFAULT_BLOCKSIZE, -1L, rowComparator, false);
CompactionScanQueryMatcher qm = CompactionScanQueryMatcher.create(scanInfo,
ScanType.COMPACT_RETAIN_DELETES, Long.MAX_VALUE, HConstants.OLDEST_TIMESTAMP,
HConstants.OLDEST_TIMESTAMP, now, from, to, null);
List<ScanQueryMatcher.MatchCode> actual = new ArrayList<>(rows.length);
byte[] prevRow = null;
for (byte[] row : rows) {
if (prevRow == null || !Bytes.equals(prevRow, row)) {
qm.setToNewRow(KeyValueUtil.createFirstOnRow(row));
prevRow = row;
}
actual.add(qm.match(new KeyValue(row, fam2, null, now, Type.Delete)));
}
assertEquals(expected.length, actual.size());
for (int i = 0; i < expected.length; i++) {
LOG.debug("expected " + expected[i] + ", actual " + actual.get(i));
assertEquals(expected[i], actual.get(i));
}
}
示例8: testExpiredDeleteFamily
import org.apache.hadoop.hbase.KeepDeletedCells; //导入依赖的package包/类
/**
* Ensure that expired delete family markers don't override valid puts
*/
@Test
public void testExpiredDeleteFamily() throws Exception {
long now = System.currentTimeMillis();
KeyValue[] kvs = new KeyValue[] {
new KeyValue(Bytes.toBytes("R1"), Bytes.toBytes("cf"), null, now-1000,
KeyValue.Type.DeleteFamily),
create("R1", "cf", "a", now-10, KeyValue.Type.Put,
"dont-care"),
};
List<KeyValueScanner> scanners = scanFixture(kvs);
Scan scan = new Scan();
scan.readVersions(1);
// scanner with ttl equal to 500
ScanInfo scanInfo = new ScanInfo(CONF, CF, 0, 1, 500, KeepDeletedCells.FALSE,
HConstants.DEFAULT_BLOCKSIZE, 0, CellComparator.getInstance(), false);
try (StoreScanner scanner = new StoreScanner(scan, scanInfo, null, scanners)) {
List<Cell> results = new ArrayList<>();
assertEquals(true, scanner.next(results));
assertEquals(1, results.size());
assertEquals(kvs[1], results.get(0));
results.clear();
assertEquals(false, scanner.next(results));
}
}
示例9: _testMatch_ExplicitColumns
import org.apache.hadoop.hbase.KeepDeletedCells; //导入依赖的package包/类
private void _testMatch_ExplicitColumns(Scan scan, List<MatchCode> expected) throws IOException {
long now = EnvironmentEdgeManager.currentTime();
// 2,4,5
ScanQueryMatcher qm = new ScanQueryMatcher(scan, new ScanInfo(this.conf, fam2,
0, 1, ttl, KeepDeletedCells.FALSE, 0, rowComparator), get.getFamilyMap().get(fam2),
now - ttl, now);
List<KeyValue> memstore = new ArrayList<KeyValue>();
memstore.add(new KeyValue(row1, fam2, col1, 1, data));
memstore.add(new KeyValue(row1, fam2, col2, 1, data));
memstore.add(new KeyValue(row1, fam2, col3, 1, data));
memstore.add(new KeyValue(row1, fam2, col4, 1, data));
memstore.add(new KeyValue(row1, fam2, col5, 1, data));
memstore.add(new KeyValue(row2, fam1, col1, data));
List<ScanQueryMatcher.MatchCode> actual = new ArrayList<ScanQueryMatcher.MatchCode>();
KeyValue k = memstore.get(0);
qm.setRow(k.getRowArray(), k.getRowOffset(), k.getRowLength());
for (KeyValue kv : memstore){
actual.add(qm.match(kv));
}
assertEquals(expected.size(), actual.size());
for(int i=0; i< expected.size(); i++){
assertEquals(expected.get(i), actual.get(i));
if(PRINT){
System.out.println("expected "+expected.get(i)+
", actual " +actual.get(i));
}
}
}
示例10: testDropDeletes
import org.apache.hadoop.hbase.KeepDeletedCells; //导入依赖的package包/类
private void testDropDeletes(
byte[] from, byte[] to, byte[][] rows, MatchCode... expected) throws IOException {
long now = EnvironmentEdgeManager.currentTime();
// Set time to purge deletes to negative value to avoid it ever happening.
ScanInfo scanInfo =
new ScanInfo(this.conf, fam2, 0, 1, ttl, KeepDeletedCells.FALSE, -1L, rowComparator);
NavigableSet<byte[]> cols = get.getFamilyMap().get(fam2);
ScanQueryMatcher qm = new ScanQueryMatcher(scan, scanInfo, cols, Long.MAX_VALUE,
HConstants.OLDEST_TIMESTAMP, HConstants.OLDEST_TIMESTAMP, now, from, to, null);
List<ScanQueryMatcher.MatchCode> actual =
new ArrayList<ScanQueryMatcher.MatchCode>(rows.length);
byte[] prevRow = null;
for (byte[] row : rows) {
if (prevRow == null || !Bytes.equals(prevRow, row)) {
qm.setRow(row, 0, (short)row.length);
prevRow = row;
}
actual.add(qm.match(new KeyValue(row, fam2, null, now, Type.Delete)));
}
assertEquals(expected.length, actual.size());
for (int i = 0; i < expected.length; i++) {
if (PRINT) System.out.println("expected " + expected[i] + ", actual " + actual.get(i));
assertEquals(expected[i], actual.get(i));
}
}
示例11: testWildCardTtlScan
import org.apache.hadoop.hbase.KeepDeletedCells; //导入依赖的package包/类
public void testWildCardTtlScan() throws IOException {
long now = System.currentTimeMillis();
KeyValue [] kvs = new KeyValue[] {
KeyValueTestUtil.create("R1", "cf", "a", now-1000, KeyValue.Type.Put, "dont-care"),
KeyValueTestUtil.create("R1", "cf", "b", now-10, KeyValue.Type.Put, "dont-care"),
KeyValueTestUtil.create("R1", "cf", "c", now-200, KeyValue.Type.Put, "dont-care"),
KeyValueTestUtil.create("R1", "cf", "d", now-10000, KeyValue.Type.Put, "dont-care"),
KeyValueTestUtil.create("R2", "cf", "a", now, KeyValue.Type.Put, "dont-care"),
KeyValueTestUtil.create("R2", "cf", "b", now-10, KeyValue.Type.Put, "dont-care"),
KeyValueTestUtil.create("R2", "cf", "c", now-200, KeyValue.Type.Put, "dont-care"),
KeyValueTestUtil.create("R2", "cf", "c", now-1000, KeyValue.Type.Put, "dont-care")
};
List<KeyValueScanner> scanners = scanFixture(kvs);
Scan scan = new Scan();
scan.setMaxVersions(1);
ScanInfo scanInfo = new ScanInfo(CONF, CF, 0, 1, 500, KeepDeletedCells.FALSE, 0,
KeyValue.COMPARATOR);
ScanType scanType = ScanType.USER_SCAN;
StoreScanner scanner =
new StoreScanner(scan, scanInfo, scanType,
null, scanners);
List<Cell> results = new ArrayList<Cell>();
assertEquals(true, scanner.next(results));
assertEquals(2, results.size());
assertEquals(kvs[1], results.get(0));
assertEquals(kvs[2], results.get(1));
results.clear();
assertEquals(true, scanner.next(results));
assertEquals(3, results.size());
assertEquals(kvs[4], results.get(0));
assertEquals(kvs[5], results.get(1));
assertEquals(kvs[6], results.get(2));
results.clear();
assertEquals(false, scanner.next(results));
}
示例12: testExpiredDeleteFamily
import org.apache.hadoop.hbase.KeepDeletedCells; //导入依赖的package包/类
/**
* Ensure that expired delete family markers don't override valid puts
*/
public void testExpiredDeleteFamily() throws Exception {
long now = System.currentTimeMillis();
KeyValue [] kvs = new KeyValue[] {
new KeyValue(Bytes.toBytes("R1"), Bytes.toBytes("cf"), null, now-1000,
KeyValue.Type.DeleteFamily),
KeyValueTestUtil.create("R1", "cf", "a", now-10, KeyValue.Type.Put,
"dont-care"),
};
List<KeyValueScanner> scanners = scanFixture(kvs);
Scan scan = new Scan();
scan.setMaxVersions(1);
// scanner with ttl equal to 500
ScanInfo scanInfo = new ScanInfo(CONF, CF, 0, 1, 500, KeepDeletedCells.FALSE, 0,
KeyValue.COMPARATOR);
ScanType scanType = ScanType.USER_SCAN;
StoreScanner scanner =
new StoreScanner(scan, scanInfo, scanType, null, scanners);
List<Cell> results = new ArrayList<Cell>();
assertEquals(true, scanner.next(results));
assertEquals(1, results.size());
assertEquals(kvs[1], results.get(0));
results.clear();
assertEquals(false, scanner.next(results));
}
示例13: createColumnFamily
import org.apache.hadoop.hbase.KeepDeletedCells; //导入依赖的package包/类
private static HColumnDescriptor createColumnFamily() {
return new HColumnDescriptor(CF_NAME)
.setMaxVersions(1)
.setBlockCacheEnabled(true)
.setBloomFilterType(BloomType.ROW)
.setCompressionType(DEFAULT_COMPRESSION_ALGORITHM)
.setDataBlockEncoding(DEFAULT_DATABLOCK_ENCODING)
.setCacheBloomsOnWrite(true)
.setCacheDataOnWrite(true)
.setCacheIndexesOnWrite(true)
.setKeepDeletedCells(KeepDeletedCells.FALSE)
.setValue(HTableDescriptor.MAX_FILESIZE, REGION_MAX_FILESIZE)
.setValue(HTableDescriptor.SPLIT_POLICY, REGION_SPLIT_POLICY);
}
示例14: ScanInfo
import org.apache.hadoop.hbase.KeepDeletedCells; //导入依赖的package包/类
/**
* @param family Name of this store's column family
* @param minVersions Store's MIN_VERSIONS setting
* @param maxVersions Store's VERSIONS setting
* @param ttl Store's TTL (in ms)
* @param timeToPurgeDeletes duration in ms after which a delete marker can
* be purged during a major compaction.
* @param keepDeletedCells Store's keepDeletedCells setting
* @param comparator The store's comparator
*/
public ScanInfo(final byte[] family, final int minVersions, final int maxVersions,
final long ttl, final KeepDeletedCells keepDeletedCells, final long timeToPurgeDeletes,
final KVComparator comparator) {
this.family = family;
this.minVersions = minVersions;
this.maxVersions = maxVersions;
this.ttl = ttl;
this.keepDeletedCells = keepDeletedCells;
this.timeToPurgeDeletes = timeToPurgeDeletes;
this.comparator = comparator;
}
示例15: testDeleteMarkerExpirationEmptyStore
import org.apache.hadoop.hbase.KeepDeletedCells; //导入依赖的package包/类
/**
* Verify that delete markers are removed from an otherwise empty store.
*/
@Test
public void testDeleteMarkerExpirationEmptyStore() throws Exception {
HTableDescriptor htd = hbu.createTableDescriptor(name.getMethodName(), 0, 1,
HConstants.FOREVER, KeepDeletedCells.TRUE);
HRegion region = hbu.createLocalHRegion(htd, null, null);
long ts = EnvironmentEdgeManager.currentTime();
Delete d = new Delete(T1, ts);
d.deleteColumns(c0, c0, ts);
region.delete(d);
d = new Delete(T1, ts);
d.deleteFamily(c0);
region.delete(d);
d = new Delete(T1, ts);
d.deleteColumn(c0, c0, ts+1);
region.delete(d);
d = new Delete(T1, ts);
d.deleteColumn(c0, c0, ts+2);
region.delete(d);
// 1 family marker, 1 column marker, 2 version markers
assertEquals(4, countDeleteMarkers(region));
// neither flush nor minor compaction removes any marker
region.flushcache();
assertEquals(4, countDeleteMarkers(region));
region.compactStores(false);
assertEquals(4, countDeleteMarkers(region));
// major compaction removes all, since there are no puts they affect
region.compactStores(true);
assertEquals(0, countDeleteMarkers(region));
HRegion.closeHRegion(region);
}