本文整理汇总了Java中org.apache.hadoop.hbase.client.Delete类的典型用法代码示例。如果您正苦于以下问题:Java Delete类的具体用法?Java Delete怎么用?Java Delete使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
Delete类属于org.apache.hadoop.hbase.client包,在下文中一共展示了Delete类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: wipeOutMeta
import org.apache.hadoop.hbase.client.Delete; //导入依赖的package包/类
protected void wipeOutMeta() throws IOException {
// Mess it up by blowing up meta.
Admin admin = TEST_UTIL.getHBaseAdmin();
Scan s = new Scan();
Table meta = new HTable(conf, TableName.META_TABLE_NAME);
ResultScanner scanner = meta.getScanner(s);
List<Delete> dels = new ArrayList<Delete>();
for (Result r : scanner) {
HRegionInfo info =
HRegionInfo.getHRegionInfo(r);
if(info != null && !info.getTable().getNamespaceAsString()
.equals(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR)) {
Delete d = new Delete(r.getRow());
dels.add(d);
admin.unassign(r.getRow(), true);
}
}
meta.delete(dels);
scanner.close();
meta.close();
}
示例2: resetSplitParent
import org.apache.hadoop.hbase.client.Delete; //导入依赖的package包/类
/**
* Reset the split parent region info in meta table
*/
private void resetSplitParent(HbckInfo hi) throws IOException {
RowMutations mutations = new RowMutations(hi.metaEntry.getRegionName());
Delete d = new Delete(hi.metaEntry.getRegionName());
d.deleteColumn(HConstants.CATALOG_FAMILY, HConstants.SPLITA_QUALIFIER);
d.deleteColumn(HConstants.CATALOG_FAMILY, HConstants.SPLITB_QUALIFIER);
mutations.add(d);
HRegionInfo hri = new HRegionInfo(hi.metaEntry);
hri.setOffline(false);
hri.setSplit(false);
Put p = MetaTableAccessor.makePutFromRegionInfo(hri);
mutations.add(p);
meta.mutateRow(mutations);
LOG.info("Reset split parent " + hi.metaEntry.getRegionNameAsString() + " in META" );
}
示例3: commitDeletes
import org.apache.hadoop.hbase.client.Delete; //导入依赖的package包/类
private void commitDeletes(final List<ODelete> deletes) throws IOException {
boolean flushSuccessfully = false;
try {
this.adapter.deleteMultiple(tableName.getNameAsString(), deletes);
flushSuccessfully = true;
} finally {
if (!flushSuccessfully && !clearBufferOnFail) {
List<Delete> hDeletes = ElementConvertor.toHBaseDeletes(deletes, this.columnMapping);
synchronized (writeBuffer) {
for (Delete delete : hDeletes) {
writeBuffer.add(delete);
currentWriteBufferSize += delete.heapSize();
}
}
}
}
}
示例4: deleteAllTs
import org.apache.hadoop.hbase.client.Delete; //导入依赖的package包/类
@Override
public void deleteAllTs(ByteBuffer tableName,
ByteBuffer row,
ByteBuffer column,
long timestamp, Map<ByteBuffer, ByteBuffer> attributes) throws IOError {
Table table = null;
try {
table = getTable(tableName);
Delete delete = new Delete(getBytes(row));
addAttributes(delete, attributes);
byte [][] famAndQf = KeyValue.parseColumn(getBytes(column));
if (famAndQf.length == 1) {
delete.deleteFamily(famAndQf[0], timestamp);
} else {
delete.deleteColumns(famAndQf[0], famAndQf[1], timestamp);
}
table.delete(delete);
} catch (IOException e) {
LOG.warn(e.getMessage(), e);
throw new IOError(Throwables.getStackTraceAsString(e));
} finally {
closeTable(table);
}
}
示例5: deleteAllRowTs
import org.apache.hadoop.hbase.client.Delete; //导入依赖的package包/类
@Override
public void deleteAllRowTs(
ByteBuffer tableName, ByteBuffer row, long timestamp,
Map<ByteBuffer, ByteBuffer> attributes) throws IOError {
Table table = null;
try {
table = getTable(tableName);
Delete delete = new Delete(getBytes(row), timestamp);
addAttributes(delete, attributes);
table.delete(delete);
} catch (IOException e) {
LOG.warn(e.getMessage(), e);
throw new IOError(Throwables.getStackTraceAsString(e));
} finally {
closeTable(table);
}
}
示例6: removeUserPermission
import org.apache.hadoop.hbase.client.Delete; //导入依赖的package包/类
/**
* Removes a previously granted permission from the stored access control
* lists. The {@link TablePermission} being removed must exactly match what
* is stored -- no wildcard matching is attempted. Ie, if user "bob" has
* been granted "READ" access to the "data" table, but only to column family
* plus qualifier "info:colA", then trying to call this method with only
* user "bob" and the table name "data" (but without specifying the
* column qualifier "info:colA") will have no effect.
*
* @param conf the configuration
* @param userPerm the details of the permission to be revoked
* @throws IOException if there is an error accessing the metadata table
*/
static void removeUserPermission(Configuration conf, UserPermission userPerm)
throws IOException {
Delete d = new Delete(userPermissionRowKey(userPerm));
byte[] key = userPermissionKey(userPerm);
if (LOG.isDebugEnabled()) {
LOG.debug("Removing permission "+ userPerm.toString());
}
d.addColumns(ACL_LIST_FAMILY, key);
// TODO: Pass in a Connection rather than create one each time.
try (Connection connection = ConnectionFactory.createConnection(conf)) {
try (Table table = connection.getTable(ACL_TABLE_NAME)) {
table.delete(d);
}
}
}
示例7: removeRegionReplicasFromMeta
import org.apache.hadoop.hbase.client.Delete; //导入依赖的package包/类
/**
* Deletes some replica columns corresponding to replicas for the passed rows
* @param metaRows rows in hbase:meta
* @param replicaIndexToDeleteFrom the replica ID we would start deleting from
* @param numReplicasToRemove how many replicas to remove
* @param connection connection we're using to access meta table
* @throws IOException
*/
public static void removeRegionReplicasFromMeta(Set<byte[]> metaRows,
int replicaIndexToDeleteFrom, int numReplicasToRemove, Connection connection)
throws IOException {
int absoluteIndex = replicaIndexToDeleteFrom + numReplicasToRemove;
for (byte[] row : metaRows) {
Delete deleteReplicaLocations = new Delete(row);
for (int i = replicaIndexToDeleteFrom; i < absoluteIndex; i++) {
deleteReplicaLocations.deleteColumns(HConstants.CATALOG_FAMILY,
getServerColumn(i));
deleteReplicaLocations.deleteColumns(HConstants.CATALOG_FAMILY,
getSeqNumColumn(i));
deleteReplicaLocations.deleteColumns(HConstants.CATALOG_FAMILY,
getStartCodeColumn(i));
}
deleteFromMetaTable(connection, deleteReplicaLocations);
}
}
示例8: updateMeta
import org.apache.hadoop.hbase.client.Delete; //导入依赖的package包/类
@Override
protected void updateMeta(final byte [] oldRegion1,
final byte [] oldRegion2,
HRegion newRegion)
throws IOException {
byte[][] regionsToDelete = {oldRegion1, oldRegion2};
for (int r = 0; r < regionsToDelete.length; r++) {
if(Bytes.equals(regionsToDelete[r], latestRegion.getRegionName())) {
latestRegion = null;
}
Delete delete = new Delete(regionsToDelete[r]);
table.delete(delete);
if(LOG.isDebugEnabled()) {
LOG.debug("updated columns in row: " + Bytes.toStringBinary(regionsToDelete[r]));
}
}
newRegion.getRegionInfo().setOffline(true);
MetaTableAccessor.addRegionToMeta(table, newRegion.getRegionInfo());
if(LOG.isDebugEnabled()) {
LOG.debug("updated columns in row: "
+ Bytes.toStringBinary(newRegion.getRegionInfo().getRegionName()));
}
}
示例9: postProcess
import org.apache.hadoop.hbase.client.Delete; //导入依赖的package包/类
@Override
public void postProcess(HRegion region, WALEdit walEdit, boolean success) throws IOException {
RegionCoprocessorHost coprocessorHost = region.getCoprocessorHost();
if (coprocessorHost != null) {
for (Mutation m : mutations) {
if (m instanceof Put) {
coprocessorHost.postPut((Put) m, walEdit, m.getDurability());
} else if (m instanceof Delete) {
coprocessorHost.postDelete((Delete) m, walEdit, m.getDurability());
}
}
// At the end call the CP hook postBatchMutateIndispensably
if (miniBatch != null) {
// Directly calling this hook, with out calling pre/postBatchMutate() when Processor do a
// read only process. Then no need to call this batch based CP hook also.
coprocessorHost.postBatchMutateIndispensably(miniBatch, success);
}
}
}
示例10: deleteAndWait
import org.apache.hadoop.hbase.client.Delete; //导入依赖的package包/类
private void deleteAndWait(byte[] row, Table source, Table... targets)
throws Exception {
Delete del = new Delete(row);
source.delete(del);
Get get = new Get(row);
for (int i = 0; i < NB_RETRIES; i++) {
if (i==NB_RETRIES-1) {
fail("Waited too much time for del replication");
}
boolean removedFromAll = true;
for (Table target : targets) {
Result res = target.get(get);
if (res.size() >= 1) {
LOG.info("Row not deleted");
removedFromAll = false;
break;
}
}
if (removedFromAll) {
break;
} else {
Thread.sleep(SLEEP_TIME);
}
}
}
示例11: buildMutateRequest
import org.apache.hadoop.hbase.client.Delete; //导入依赖的package包/类
/**
* Create a protocol buffer MutateRequest for a conditioned delete
*
* @param regionName
* @param row
* @param family
* @param qualifier
* @param comparator
* @param compareType
* @param delete
* @return a mutate request
* @throws IOException
*/
public static MutateRequest buildMutateRequest(
final byte[] regionName, final byte[] row, final byte[] family,
final byte [] qualifier, final ByteArrayComparable comparator,
final CompareType compareType, final Delete delete) throws IOException {
MutateRequest.Builder builder = MutateRequest.newBuilder();
RegionSpecifier region = buildRegionSpecifier(
RegionSpecifierType.REGION_NAME, regionName);
builder.setRegion(region);
Condition condition = buildCondition(
row, family, qualifier, comparator, compareType);
builder.setMutation(ProtobufUtil.toMutation(MutationType.DELETE, delete,
MutationProto.newBuilder()));
builder.setCondition(condition);
return builder.build();
}
示例12: verifyUserDeniedForDeleteMultipleVersions
import org.apache.hadoop.hbase.client.Delete; //导入依赖的package包/类
private void verifyUserDeniedForDeleteMultipleVersions(final User user, final byte[] row,
final byte[] q1, final byte[] q2) throws IOException, InterruptedException {
user.runAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
try (Connection connection = ConnectionFactory.createConnection(conf)) {
try (Table t = connection.getTable(TEST_TABLE.getTableName())) {
Delete d = new Delete(row);
d.addColumns(TEST_FAMILY1, q1);
d.addColumns(TEST_FAMILY1, q2);
t.delete(d);
fail(user.getShortName() + " should not be allowed to delete the row");
} catch (Exception e) {
}
}
return null;
}
});
}
示例13: verifyUserDeniedForDeleteExactVersion
import org.apache.hadoop.hbase.client.Delete; //导入依赖的package包/类
private void verifyUserDeniedForDeleteExactVersion(final User user, final byte[] row,
final byte[] q1, final byte[] q2) throws IOException, InterruptedException {
user.runAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
try (Connection connection = ConnectionFactory.createConnection(conf)) {
try (Table t = connection.getTable(TEST_TABLE.getTableName())) {
Delete d = new Delete(row, 127);
d.addColumns(TEST_FAMILY1, q1);
d.addColumns(TEST_FAMILY1, q2);
d.addFamily(TEST_FAMILY2, 129);
t.delete(d);
fail(user.getShortName() + " can not do the delete");
} catch (Exception e) {
}
}
return null;
}
});
}
示例14: verifyUserAllowedforCheckAndDelete
import org.apache.hadoop.hbase.client.Delete; //导入依赖的package包/类
private void verifyUserAllowedforCheckAndDelete(final User user, final byte[] row,
final byte[] q1, final byte[] value) throws IOException, InterruptedException {
user.runAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
try (Connection connection = ConnectionFactory.createConnection(conf)) {
try (Table t = connection.getTable(TEST_TABLE.getTableName())) {
Delete d = new Delete(row);
d.addColumn(TEST_FAMILY1, q1, 120);
t.checkAndDelete(row, TEST_FAMILY1, q1, value, d);
}
}
return null;
}
});
}
示例15: verifyUserDeniedForCheckAndDelete
import org.apache.hadoop.hbase.client.Delete; //导入依赖的package包/类
private void verifyUserDeniedForCheckAndDelete(final User user, final byte[] row,
final byte[] value) throws IOException, InterruptedException {
user.runAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
try (Connection connection = ConnectionFactory.createConnection(conf)) {
try (Table t = connection.getTable(TEST_TABLE.getTableName())) {
Delete d = new Delete(row);
d.addColumns(TEST_FAMILY1, TEST_Q1);
t.checkAndDelete(row, TEST_FAMILY1, TEST_Q1, value, d);
fail(user.getShortName() + " should not be allowed to do checkAndDelete");
} catch (Exception e) {
}
}
return null;
}
});
}