本文整理汇总了Java中org.apache.hadoop.hbase.client.Delete.deleteColumn方法的典型用法代码示例。如果您正苦于以下问题:Java Delete.deleteColumn方法的具体用法?Java Delete.deleteColumn怎么用?Java Delete.deleteColumn使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hbase.client.Delete
的用法示例。
在下文中一共展示了Delete.deleteColumn方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: resetSplitParent
import org.apache.hadoop.hbase.client.Delete; //导入方法依赖的package包/类
/**
* Reset the split parent region info in meta table
*/
private void resetSplitParent(HbckInfo hi) throws IOException {
RowMutations mutations = new RowMutations(hi.metaEntry.getRegionName());
Delete d = new Delete(hi.metaEntry.getRegionName());
d.deleteColumn(HConstants.CATALOG_FAMILY, HConstants.SPLITA_QUALIFIER);
d.deleteColumn(HConstants.CATALOG_FAMILY, HConstants.SPLITB_QUALIFIER);
mutations.add(d);
HRegionInfo hri = new HRegionInfo(hi.metaEntry);
hri.setOffline(false);
hri.setSplit(false);
Put p = MetaTableAccessor.makePutFromRegionInfo(hri);
mutations.add(p);
meta.mutateRow(mutations);
LOG.info("Reset split parent " + hi.metaEntry.getRegionNameAsString() + " in META" );
}
示例2: deleteColumns
import org.apache.hadoop.hbase.client.Delete; //导入方法依赖的package包/类
private void deleteColumns(HRegion r, String value, String keyPrefix) throws IOException {
InternalScanner scanner = buildScanner(keyPrefix, value, r);
int count = 0;
boolean more = false;
List<Cell> results = new ArrayList<Cell>();
do {
more = scanner.next(results);
if (results != null && !results.isEmpty())
count++;
else
break;
Delete delete = new Delete(CellUtil.cloneRow(results.get(0)));
delete.deleteColumn(Bytes.toBytes("trans-tags"), Bytes.toBytes("qual2"));
r.delete(delete);
results.clear();
} while (more);
assertEquals("Did not perform correct number of deletes", 3, count);
}
示例3: testRowMutation
import org.apache.hadoop.hbase.client.Delete; //导入方法依赖的package包/类
@Test (timeout=300000)
public void testRowMutation() throws IOException {
TableName tableName = TableName.valueOf(TEST_TABLE.getNameAsString() + ".testRowMutation");
Table table = util.createTable(tableName, new byte[][] {A, B, C});
try {
verifyMethodResult(SimpleRegionObserver.class,
new String[] {"hadPreGet", "hadPostGet", "hadPrePut", "hadPostPut",
"hadDeleted"},
tableName,
new Boolean[] {false, false, false, false, false});
Put put = new Put(ROW);
put.add(A, A, A);
put.add(B, B, B);
put.add(C, C, C);
Delete delete = new Delete(ROW);
delete.deleteColumn(A, A);
delete.deleteColumn(B, B);
delete.deleteColumn(C, C);
RowMutations arm = new RowMutations(ROW);
arm.add(put);
arm.add(delete);
table.mutateRow(arm);
verifyMethodResult(SimpleRegionObserver.class,
new String[] {"hadPreGet", "hadPostGet", "hadPrePut", "hadPostPut",
"hadDeleted"},
tableName,
new Boolean[] {false, false, true, true, true}
);
} finally {
util.deleteTable(tableName);
table.close();
}
}
示例4: testMinorCompactionWithDeleteColumn2
import org.apache.hadoop.hbase.client.Delete; //导入方法依赖的package包/类
@Test
public void testMinorCompactionWithDeleteColumn2() throws Exception {
Delete dc = new Delete(secondRowBytes);
dc.deleteColumn(fam2, col2);
/* compactionThreshold is 3. The table has 4 versions: 0, 1, 2, and 3.
* we only delete the latest version. One might expect to see only
* versions 1 and 2. HBase differs, and gives us 0, 1 and 2.
* This is okay as well. Since there was no compaction done before the
* delete, version 0 seems to stay on.
*/
testMinorCompactionWithDelete(dc, 3);
}
示例5: testMinorCompactionWithDeleteVersion2
import org.apache.hadoop.hbase.client.Delete; //导入方法依赖的package包/类
@Test
public void testMinorCompactionWithDeleteVersion2() throws Exception {
Delete deleteVersion = new Delete(secondRowBytes);
deleteVersion.deleteColumn(fam2, col2, 1);
/*
* the table has 4 versions: 0, 1, 2, and 3.
* We delete 1.
* Should have 3 remaining.
*/
testMinorCompactionWithDelete(deleteVersion, 3);
}
示例6: testCheckAndMutate_WithCorrectValue
import org.apache.hadoop.hbase.client.Delete; //导入方法依赖的package包/类
@Test
public void testCheckAndMutate_WithCorrectValue() throws IOException {
byte[] row1 = Bytes.toBytes("row1");
byte[] fam1 = Bytes.toBytes("fam1");
byte[] qf1 = Bytes.toBytes("qualifier");
byte[] val1 = Bytes.toBytes("value1");
// Setting up region
String method = this.getName();
this.region = initHRegion(tableName, method, CONF, fam1);
try {
// Putting data in key
Put put = new Put(row1);
put.add(fam1, qf1, val1);
region.put(put);
// checkAndPut with correct value
boolean res = region.checkAndMutate(row1, fam1, qf1, CompareOp.EQUAL, new BinaryComparator(
val1), put, true);
assertEquals(true, res);
// checkAndDelete with correct value
Delete delete = new Delete(row1);
delete.deleteColumn(fam1, qf1);
res = region.checkAndMutate(row1, fam1, qf1, CompareOp.EQUAL, new BinaryComparator(val1),
delete, true);
assertEquals(true, res);
} finally {
HRegion.closeHRegion(this.region);
this.region = null;
}
}
示例7: testDelete_multiDeleteColumn
import org.apache.hadoop.hbase.client.Delete; //导入方法依赖的package包/类
@Test
public void testDelete_multiDeleteColumn() throws IOException {
byte[] row1 = Bytes.toBytes("row1");
byte[] fam1 = Bytes.toBytes("fam1");
byte[] qual = Bytes.toBytes("qualifier");
byte[] value = Bytes.toBytes("value");
Put put = new Put(row1);
put.add(fam1, qual, 1, value);
put.add(fam1, qual, 2, value);
String method = this.getName();
this.region = initHRegion(tableName, method, CONF, fam1);
try {
region.put(put);
// We do support deleting more than 1 'latest' version
Delete delete = new Delete(row1);
delete.deleteColumn(fam1, qual);
delete.deleteColumn(fam1, qual);
region.delete(delete);
Get get = new Get(row1);
get.addFamily(fam1);
Result r = region.get(get);
assertEquals(0, r.size());
} finally {
HRegion.closeHRegion(this.region);
this.region = null;
}
}
示例8: testRegionObserver
import org.apache.hadoop.hbase.client.Delete; //导入方法依赖的package包/类
@Test (timeout=300000)
public void testRegionObserver() throws IOException {
TableName tableName = TableName.valueOf(TEST_TABLE.getNameAsString() + ".testRegionObserver");
// recreate table every time in order to reset the status of the
// coprocessor.
Table table = util.createTable(tableName, new byte[][] {A, B, C});
try {
verifyMethodResult(SimpleRegionObserver.class, new String[] { "hadPreGet", "hadPostGet",
"hadPrePut", "hadPostPut", "hadDelete", "hadPostStartRegionOperation",
"hadPostCloseRegionOperation", "hadPostBatchMutateIndispensably" }, tableName,
new Boolean[] { false, false, false, false, false, false, false, false });
Put put = new Put(ROW);
put.add(A, A, A);
put.add(B, B, B);
put.add(C, C, C);
table.put(put);
verifyMethodResult(SimpleRegionObserver.class, new String[] { "hadPreGet", "hadPostGet",
"hadPrePut", "hadPostPut", "hadPreBatchMutate", "hadPostBatchMutate", "hadDelete",
"hadPostStartRegionOperation", "hadPostCloseRegionOperation",
"hadPostBatchMutateIndispensably" }, TEST_TABLE, new Boolean[] { false, false, true,
true, true, true, false, true, true, true });
verifyMethodResult(SimpleRegionObserver.class,
new String[] {"getCtPreOpen", "getCtPostOpen", "getCtPreClose", "getCtPostClose"},
tableName,
new Integer[] {1, 1, 0, 0});
Get get = new Get(ROW);
get.addColumn(A, A);
get.addColumn(B, B);
get.addColumn(C, C);
table.get(get);
verifyMethodResult(SimpleRegionObserver.class,
new String[] {"hadPreGet", "hadPostGet", "hadPrePut", "hadPostPut",
"hadDelete", "hadPrePreparedDeleteTS"},
tableName,
new Boolean[] {true, true, true, true, false, false}
);
Delete delete = new Delete(ROW);
delete.deleteColumn(A, A);
delete.deleteColumn(B, B);
delete.deleteColumn(C, C);
table.delete(delete);
verifyMethodResult(SimpleRegionObserver.class,
new String[] {"hadPreGet", "hadPostGet", "hadPrePut", "hadPostPut",
"hadPreBatchMutate", "hadPostBatchMutate", "hadDelete", "hadPrePreparedDeleteTS"},
tableName,
new Boolean[] {true, true, true, true, true, true, true, true}
);
} finally {
util.deleteTable(tableName);
table.close();
}
verifyMethodResult(SimpleRegionObserver.class,
new String[] {"getCtPreOpen", "getCtPostOpen", "getCtPreClose", "getCtPostClose"},
tableName,
new Integer[] {1, 1, 1, 1});
}
示例9: testWithoutKeepingDeletes
import org.apache.hadoop.hbase.client.Delete; //导入方法依赖的package包/类
/**
* basic verification of existing behavior
*/
@Test
public void testWithoutKeepingDeletes() throws Exception {
// KEEP_DELETED_CELLS is NOT enabled
HTableDescriptor htd = hbu.createTableDescriptor(name.getMethodName(), 0, 3,
HConstants.FOREVER, KeepDeletedCells.FALSE);
HRegion region = hbu.createLocalHRegion(htd, null, null);
long ts = EnvironmentEdgeManager.currentTime();
Put p = new Put(T1, ts);
p.add(c0, c0, T1);
region.put(p);
Get gOne = new Get(T1);
gOne.setMaxVersions();
gOne.setTimeRange(0L, ts + 1);
Result rOne = region.get(gOne);
assertFalse(rOne.isEmpty());
Delete d = new Delete(T1, ts+2);
d.deleteColumn(c0, c0, ts);
region.delete(d);
// "past" get does not see rows behind delete marker
Get g = new Get(T1);
g.setMaxVersions();
g.setTimeRange(0L, ts+1);
Result r = region.get(g);
assertTrue(r.isEmpty());
// "past" scan does not see rows behind delete marker
Scan s = new Scan();
s.setMaxVersions();
s.setTimeRange(0L, ts+1);
InternalScanner scanner = region.getScanner(s);
List<Cell> kvs = new ArrayList<Cell>();
while (scanner.next(kvs))
;
assertTrue(kvs.isEmpty());
// flushing and minor compaction keep delete markers
region.flush(true);
region.compact(false);
assertEquals(1, countDeleteMarkers(region));
region.compact(true);
// major compaction deleted it
assertEquals(0, countDeleteMarkers(region));
HRegion.closeHRegion(region);
}