本文整理汇总了Java中org.apache.hadoop.hbase.client.Delete.deleteFamily方法的典型用法代码示例。如果您正苦于以下问题:Java Delete.deleteFamily方法的具体用法?Java Delete.deleteFamily怎么用?Java Delete.deleteFamily使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hbase.client.Delete
的用法示例。
在下文中一共展示了Delete.deleteFamily方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: deleteAllTs
import org.apache.hadoop.hbase.client.Delete; //导入方法依赖的package包/类
@Override
public void deleteAllTs(ByteBuffer tableName,
ByteBuffer row,
ByteBuffer column,
long timestamp, Map<ByteBuffer, ByteBuffer> attributes) throws IOError {
Table table = null;
try {
table = getTable(tableName);
Delete delete = new Delete(getBytes(row));
addAttributes(delete, attributes);
byte [][] famAndQf = KeyValue.parseColumn(getBytes(column));
if (famAndQf.length == 1) {
delete.deleteFamily(famAndQf[0], timestamp);
} else {
delete.deleteColumns(famAndQf[0], famAndQf[1], timestamp);
}
table.delete(delete);
} catch (IOException e) {
LOG.warn(e.getMessage(), e);
throw new IOError(Throwables.getStackTraceAsString(e));
} finally {
closeTable(table);
}
}
示例2: deleteAndWaitWithFamily
import org.apache.hadoop.hbase.client.Delete; //导入方法依赖的package包/类
private void deleteAndWaitWithFamily(byte[] row, byte[] fam,
Table source, Table... targets)
throws Exception {
Delete del = new Delete(row);
del.deleteFamily(fam);
source.delete(del);
Get get = new Get(row);
get.addFamily(fam);
for (int i = 0; i < NB_RETRIES; i++) {
if (i==NB_RETRIES-1) {
fail("Waited too much time for del replication");
}
boolean removedFromAll = true;
for (Table target : targets) {
Result res = target.get(get);
if (res.size() >= 1) {
LOG.info("Row not deleted");
removedFromAll = false;
break;
}
}
if (removedFromAll) {
break;
} else {
Thread.sleep(SLEEP_TIME);
}
}
}
示例3: testReadWrite
import org.apache.hadoop.hbase.client.Delete; //导入方法依赖的package包/类
@Test (timeout=180000)
public void testReadWrite() throws Exception {
// action for checkAndDelete
AccessTestAction checkAndDeleteAction = new AccessTestAction() {
@Override
public Object run() throws Exception {
Delete d = new Delete(TEST_ROW);
d.deleteFamily(TEST_FAMILY);
try(Connection conn = ConnectionFactory.createConnection(conf);
Table t = conn.getTable(TEST_TABLE);) {
t.checkAndDelete(TEST_ROW, TEST_FAMILY, TEST_QUALIFIER,
Bytes.toBytes("test_value"), d);
}
return null;
}
};
verifyReadWrite(checkAndDeleteAction);
// action for checkAndPut()
AccessTestAction checkAndPut = new AccessTestAction() {
@Override
public Object run() throws Exception {
Put p = new Put(TEST_ROW);
p.add(TEST_FAMILY, TEST_QUALIFIER, Bytes.toBytes(1));
try(Connection conn = ConnectionFactory.createConnection(conf);
Table t = conn.getTable(TEST_TABLE);) {
t.checkAndPut(TEST_ROW, TEST_FAMILY, TEST_QUALIFIER,
Bytes.toBytes("test_value"), p);
}
return null;
}
};
verifyReadWrite(checkAndPut);
}
示例4: deleteNumericRows
import org.apache.hadoop.hbase.client.Delete; //导入方法依赖的package包/类
public void deleteNumericRows(final HTable t, final byte[] f, int startRow, int endRow)
throws IOException {
for (int i = startRow; i < endRow; i++) {
byte[] data = Bytes.toBytes(String.valueOf(i));
Delete delete = new Delete(data);
delete.deleteFamily(f);
t.delete(delete);
}
}
示例5: testCheckAndMutate_WithWrongValue
import org.apache.hadoop.hbase.client.Delete; //导入方法依赖的package包/类
@Test
public void testCheckAndMutate_WithWrongValue() throws IOException {
byte[] row1 = Bytes.toBytes("row1");
byte[] fam1 = Bytes.toBytes("fam1");
byte[] qf1 = Bytes.toBytes("qualifier");
byte[] val1 = Bytes.toBytes("value1");
byte[] val2 = Bytes.toBytes("value2");
// Setting up region
String method = this.getName();
this.region = initHRegion(tableName, method, CONF, fam1);
try {
// Putting data in key
Put put = new Put(row1);
put.add(fam1, qf1, val1);
region.put(put);
// checkAndPut with wrong value
boolean res = region.checkAndMutate(row1, fam1, qf1, CompareOp.EQUAL, new BinaryComparator(
val2), put, true);
assertEquals(false, res);
// checkAndDelete with wrong value
Delete delete = new Delete(row1);
delete.deleteFamily(fam1);
res = region.checkAndMutate(row1, fam1, qf1, CompareOp.EQUAL, new BinaryComparator(val2),
put, true);
assertEquals(false, res);
} finally {
HRegion.closeHRegion(this.region);
this.region = null;
}
}
示例6: deleteFamily
import org.apache.hadoop.hbase.client.Delete; //导入方法依赖的package包/类
private void deleteFamily(String family, String row, long version)
throws IOException {
Delete del = new Delete(Bytes.toBytes(row));
del.deleteFamily(Bytes.toBytes(family + "_ROWCOL"), version);
del.deleteFamily(Bytes.toBytes(family + "_ROW"), version);
del.deleteFamily(Bytes.toBytes(family + "_NONE"), version);
region.delete(del);
}
示例7: testWrite
import org.apache.hadoop.hbase.client.Delete; //导入方法依赖的package包/类
@Test (timeout=180000)
// test put, delete, increment
public void testWrite() throws Exception {
// put action
AccessTestAction putAction = new AccessTestAction() {
@Override
public Object run() throws Exception {
Put p = new Put(TEST_ROW);
p.add(TEST_FAMILY, TEST_QUALIFIER, Bytes.toBytes(1));
try(Connection conn = ConnectionFactory.createConnection(conf);
Table t = conn.getTable(TEST_TABLE)) {
t.put(p);
}
return null;
}
};
verifyWrite(putAction);
// delete action
AccessTestAction deleteAction = new AccessTestAction() {
@Override
public Object run() throws Exception {
Delete d = new Delete(TEST_ROW);
d.deleteFamily(TEST_FAMILY);
try(Connection conn = ConnectionFactory.createConnection(conf);
Table t = conn.getTable(TEST_TABLE)) {
t.delete(d);
}
return null;
}
};
verifyWrite(deleteAction);
// increment action
AccessTestAction incrementAction = new AccessTestAction() {
@Override
public Object run() throws Exception {
Increment inc = new Increment(TEST_ROW);
inc.addColumn(TEST_FAMILY, TEST_QUALIFIER, 1);
try(Connection conn = ConnectionFactory.createConnection(conf);
Table t = conn.getTable(TEST_TABLE);) {
t.increment(inc);
}
return null;
}
};
verifyWrite(incrementAction);
}
示例8: testInterruptCompaction
import org.apache.hadoop.hbase.client.Delete; //导入方法依赖的package包/类
/**
* Verify that you can stop a long-running compaction (used during RS shutdown)
* @throws Exception
*/
@Test
public void testInterruptCompaction() throws Exception {
assertEquals(0, count());
// lower the polling interval for this test
int origWI = HStore.closeCheckInterval;
HStore.closeCheckInterval = 10 * 1000; // 10 KB
try {
// Create a couple store files w/ 15KB (over 10KB interval)
int jmax = (int) Math.ceil(15.0 / compactionThreshold);
byte[] pad = new byte[1000]; // 1 KB chunk
for (int i = 0; i < compactionThreshold; i++) {
HRegionIncommon loader = new HRegionIncommon(r);
Put p = new Put(Bytes.add(STARTROW, Bytes.toBytes(i)));
p.setDurability(Durability.SKIP_WAL);
for (int j = 0; j < jmax; j++) {
p.add(COLUMN_FAMILY, Bytes.toBytes(j), pad);
}
HBaseTestCase.addContent(loader, Bytes.toString(COLUMN_FAMILY));
loader.put(p);
loader.flushcache();
}
HRegion spyR = spy(r);
doAnswer(new Answer() {
public Object answer(InvocationOnMock invocation) throws Throwable {
r.writestate.writesEnabled = false;
return invocation.callRealMethod();
}
}).when(spyR).doRegionCompactionPrep();
// force a minor compaction, but not before requesting a stop
spyR.compactStores();
// ensure that the compaction stopped, all old files are intact,
Store s = r.stores.get(COLUMN_FAMILY);
assertEquals(compactionThreshold, s.getStorefilesCount());
assertTrue(s.getStorefilesSize() > 15 * 1000);
// and no new store files persisted past compactStores()
FileStatus[] ls = r.getFilesystem().listStatus(r.getRegionFileSystem().getTempDir());
assertEquals(0, ls.length);
} finally {
// don't mess up future tests
r.writestate.writesEnabled = true;
HStore.closeCheckInterval = origWI;
// Delete all Store information once done using
for (int i = 0; i < compactionThreshold; i++) {
Delete delete = new Delete(Bytes.add(STARTROW, Bytes.toBytes(i)));
byte[][] famAndQf = { COLUMN_FAMILY, null };
delete.deleteFamily(famAndQf[0]);
r.delete(delete);
}
r.flush(true);
// Multiple versions allowed for an entry, so the delete isn't enough
// Lower TTL and expire to ensure that all our entries have been wiped
final int ttl = 1000;
for (Store hstore : this.r.stores.values()) {
HStore store = (HStore) hstore;
ScanInfo old = store.getScanInfo();
ScanInfo si =
new ScanInfo(old.getConfiguration(), old.getFamily(), old.getMinVersions(),
old.getMaxVersions(), ttl, old.getKeepDeletedCells(), 0, old.getComparator());
store.setScanInfo(si);
}
Thread.sleep(ttl);
r.compact(true);
assertEquals(0, count());
}
}
示例9: testMinorCompactionWithDeleteColumnFamily
import org.apache.hadoop.hbase.client.Delete; //导入方法依赖的package包/类
@Test
public void testMinorCompactionWithDeleteColumnFamily() throws Exception {
Delete deleteCF = new Delete(secondRowBytes);
deleteCF.deleteFamily(fam2);
testMinorCompactionWithDelete(deleteCF);
}
示例10: testDeleteFamily_PostInsert
import org.apache.hadoop.hbase.client.Delete; //导入方法依赖的package包/类
@Test
public void testDeleteFamily_PostInsert() throws IOException, InterruptedException {
Delete delete = new Delete(row);
delete.deleteFamily(fam1);
doTestDelete_AndPostInsert(delete);
}