本文整理汇总了Java中org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteRequest.DeleteType类的典型用法代码示例。如果您正苦于以下问题:Java DeleteType类的具体用法?Java DeleteType怎么用?Java DeleteType使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
DeleteType类属于org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteRequest包,在下文中一共展示了DeleteType类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testBulkDeleteEndpoint
import org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteRequest.DeleteType; //导入依赖的package包/类
public void testBulkDeleteEndpoint() throws Throwable {
TableName tableName = TableName.valueOf("testBulkDeleteEndpoint");
Table ht = createTable(tableName);
List<Put> puts = new ArrayList<Put>(100);
for (int j = 0; j < 100; j++) {
byte[] rowkey = Bytes.toBytes(j);
puts.add(createPut(rowkey, "v1"));
}
ht.put(puts);
// Deleting all the rows.
long noOfRowsDeleted = invokeBulkDeleteProtocol(tableName, new Scan(), 5, DeleteType.ROW, null);
assertEquals(100, noOfRowsDeleted);
int rows = 0;
for (Result result : ht.getScanner(new Scan())) {
rows++;
}
assertEquals(0, rows);
ht.close();
}
示例2: testBulkDeleteEndpointWhenRowBatchSizeLessThanRowsToDeleteFromARegion
import org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteRequest.DeleteType; //导入依赖的package包/类
public void testBulkDeleteEndpointWhenRowBatchSizeLessThanRowsToDeleteFromARegion()
throws Throwable {
TableName tableName = TableName
.valueOf("testBulkDeleteEndpointWhenRowBatchSizeLessThanRowsToDeleteFromARegion");
Table ht = createTable(tableName);
List<Put> puts = new ArrayList<Put>(100);
for (int j = 0; j < 100; j++) {
byte[] rowkey = Bytes.toBytes(j);
puts.add(createPut(rowkey, "v1"));
}
ht.put(puts);
// Deleting all the rows.
long noOfRowsDeleted = invokeBulkDeleteProtocol(tableName, new Scan(), 10, DeleteType.ROW, null);
assertEquals(100, noOfRowsDeleted);
int rows = 0;
for (Result result : ht.getScanner(new Scan())) {
rows++;
}
assertEquals(0, rows);
ht.close();
}
示例3: testBulkDeleteFamily
import org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteRequest.DeleteType; //导入依赖的package包/类
public void testBulkDeleteFamily() throws Throwable {
TableName tableName = TableName.valueOf("testBulkDeleteFamily");
HTableDescriptor htd = new HTableDescriptor(tableName);
htd.addFamily(new HColumnDescriptor(FAMILY1));
htd.addFamily(new HColumnDescriptor(FAMILY2));
TEST_UTIL.getHBaseAdmin().createTable(htd, Bytes.toBytes(0), Bytes.toBytes(120), 5);
Table ht = new HTable(TEST_UTIL.getConfiguration(), tableName);
List<Put> puts = new ArrayList<Put>(100);
for (int j = 0; j < 100; j++) {
Put put = new Put(Bytes.toBytes(j));
put.add(FAMILY1, QUALIFIER1, "v1".getBytes());
put.add(FAMILY2, QUALIFIER2, "v2".getBytes());
puts.add(put);
}
ht.put(puts);
Scan scan = new Scan();
scan.addFamily(FAMILY1);
// Delete the column family cf1
long noOfRowsDeleted = invokeBulkDeleteProtocol(tableName, scan, 500, DeleteType.FAMILY, null);
assertEquals(100, noOfRowsDeleted);
int rows = 0;
for (Result result : ht.getScanner(new Scan())) {
assertTrue(result.getFamilyMap(FAMILY1).isEmpty());
assertEquals(1, result.getColumnCells(FAMILY2, QUALIFIER2).size());
rows++;
}
assertEquals(100, rows);
ht.close();
}
示例4: testBulkDeleteEndpoint
import org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteRequest.DeleteType; //导入依赖的package包/类
public void testBulkDeleteEndpoint() throws Throwable {
byte[] tableName = Bytes.toBytes("testBulkDeleteEndpoint");
HTable ht = createTable(tableName);
List<Put> puts = new ArrayList<Put>(100);
for (int j = 0; j < 100; j++) {
byte[] rowkey = Bytes.toBytes(j);
puts.add(createPut(rowkey, "v1"));
}
ht.put(puts);
// Deleting all the rows.
long noOfRowsDeleted = invokeBulkDeleteProtocol(tableName, new Scan(), 5, DeleteType.ROW, null);
assertEquals(100, noOfRowsDeleted);
int rows = 0;
for (Result result : ht.getScanner(new Scan())) {
rows++;
}
assertEquals(0, rows);
ht.close();
}
示例5: testBulkDeleteEndpointWhenRowBatchSizeLessThanRowsToDeleteFromARegion
import org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteRequest.DeleteType; //导入依赖的package包/类
public void testBulkDeleteEndpointWhenRowBatchSizeLessThanRowsToDeleteFromARegion()
throws Throwable {
byte[] tableName = Bytes
.toBytes("testBulkDeleteEndpointWhenRowBatchSizeLessThanRowsToDeleteFromARegion");
HTable ht = createTable(tableName);
List<Put> puts = new ArrayList<Put>(100);
for (int j = 0; j < 100; j++) {
byte[] rowkey = Bytes.toBytes(j);
puts.add(createPut(rowkey, "v1"));
}
ht.put(puts);
// Deleting all the rows.
long noOfRowsDeleted = invokeBulkDeleteProtocol(tableName, new Scan(), 10, DeleteType.ROW, null);
assertEquals(100, noOfRowsDeleted);
int rows = 0;
for (Result result : ht.getScanner(new Scan())) {
rows++;
}
assertEquals(0, rows);
ht.close();
}
示例6: testBulkDeleteEndpoint
import org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteRequest.DeleteType; //导入依赖的package包/类
@Test
public void testBulkDeleteEndpoint() throws Throwable {
byte[] tableName = Bytes.toBytes("testBulkDeleteEndpoint");
HTable ht = createTable(tableName);
List<Put> puts = new ArrayList<Put>(100);
for (int j = 0; j < 100; j++) {
byte[] rowkey = Bytes.toBytes(j);
puts.add(createPut(rowkey, "v1"));
}
ht.put(puts);
// Deleting all the rows.
long noOfRowsDeleted = invokeBulkDeleteProtocol(tableName, new Scan(), 5, DeleteType.ROW, null);
assertEquals(100, noOfRowsDeleted);
int rows = 0;
for (Result result : ht.getScanner(new Scan())) {
rows++;
}
assertEquals(0, rows);
}
示例7: testBulkDeleteEndpointWhenRowBatchSizeLessThanRowsToDeleteFromARegion
import org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteRequest.DeleteType; //导入依赖的package包/类
@Test
public void testBulkDeleteEndpointWhenRowBatchSizeLessThanRowsToDeleteFromARegion()
throws Throwable {
byte[] tableName = Bytes
.toBytes("testBulkDeleteEndpointWhenRowBatchSizeLessThanRowsToDeleteFromARegion");
HTable ht = createTable(tableName);
List<Put> puts = new ArrayList<Put>(100);
for (int j = 0; j < 100; j++) {
byte[] rowkey = Bytes.toBytes(j);
puts.add(createPut(rowkey, "v1"));
}
ht.put(puts);
// Deleting all the rows.
long noOfRowsDeleted = invokeBulkDeleteProtocol(tableName, new Scan(), 10, DeleteType.ROW, null);
assertEquals(100, noOfRowsDeleted);
int rows = 0;
for (Result result : ht.getScanner(new Scan())) {
rows++;
}
assertEquals(0, rows);
}
示例8: invokeBulkDeleteProtocol
import org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteRequest.DeleteType; //导入依赖的package包/类
private long invokeBulkDeleteProtocol(TableName tableName, final Scan scan, final int rowBatchSize,
final DeleteType deleteType, final Long timeStamp) throws Throwable {
Table ht = new HTable(TEST_UTIL.getConfiguration(), tableName);
long noOfDeletedRows = 0L;
Batch.Call<BulkDeleteService, BulkDeleteResponse> callable =
new Batch.Call<BulkDeleteService, BulkDeleteResponse>() {
ServerRpcController controller = new ServerRpcController();
BlockingRpcCallback<BulkDeleteResponse> rpcCallback =
new BlockingRpcCallback<BulkDeleteResponse>();
public BulkDeleteResponse call(BulkDeleteService service) throws IOException {
Builder builder = BulkDeleteRequest.newBuilder();
builder.setScan(ProtobufUtil.toScan(scan));
builder.setDeleteType(deleteType);
builder.setRowBatchSize(rowBatchSize);
if (timeStamp != null) {
builder.setTimestamp(timeStamp);
}
service.delete(controller, builder.build(), rpcCallback);
return rpcCallback.get();
}
};
Map<byte[], BulkDeleteResponse> result = ht.coprocessorService(BulkDeleteService.class, scan
.getStartRow(), scan.getStopRow(), callable);
for (BulkDeleteResponse response : result.values()) {
noOfDeletedRows += response.getRowsDeleted();
}
ht.close();
return noOfDeletedRows;
}
示例9: testBulkDeleteWithConditionBasedDelete
import org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteRequest.DeleteType; //导入依赖的package包/类
public void testBulkDeleteWithConditionBasedDelete() throws Throwable {
TableName tableName = TableName.valueOf("testBulkDeleteWithConditionBasedDelete");
Table ht = createTable(tableName);
List<Put> puts = new ArrayList<Put>(100);
for (int j = 0; j < 100; j++) {
byte[] rowkey = Bytes.toBytes(j);
String value = (j % 10 == 0) ? "v1" : "v2";
puts.add(createPut(rowkey, value));
}
ht.put(puts);
Scan scan = new Scan();
FilterList fl = new FilterList(Operator.MUST_PASS_ALL);
SingleColumnValueFilter scvf = new SingleColumnValueFilter(FAMILY1, QUALIFIER3,
CompareOp.EQUAL, Bytes.toBytes("v1"));
// fl.addFilter(new FirstKeyOnlyFilter());
fl.addFilter(scvf);
scan.setFilter(fl);
// Deleting all the rows where cf1:c1=v1
long noOfRowsDeleted = invokeBulkDeleteProtocol(tableName, scan, 500, DeleteType.ROW, null);
assertEquals(10, noOfRowsDeleted);
int rows = 0;
for (Result result : ht.getScanner(new Scan())) {
rows++;
}
assertEquals(90, rows);
ht.close();
}
示例10: testBulkDeleteColumn
import org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteRequest.DeleteType; //导入依赖的package包/类
public void testBulkDeleteColumn() throws Throwable {
TableName tableName = TableName.valueOf("testBulkDeleteColumn");
Table ht = createTable(tableName);
List<Put> puts = new ArrayList<Put>(100);
for (int j = 0; j < 100; j++) {
byte[] rowkey = Bytes.toBytes(j);
String value = (j % 10 == 0) ? "v1" : "v2";
puts.add(createPut(rowkey, value));
}
ht.put(puts);
Scan scan = new Scan();
scan.addColumn(FAMILY1, QUALIFIER2);
// Delete the column cf1:col2
long noOfRowsDeleted = invokeBulkDeleteProtocol(tableName, scan, 500, DeleteType.COLUMN, null);
assertEquals(100, noOfRowsDeleted);
int rows = 0;
for (Result result : ht.getScanner(new Scan())) {
assertEquals(2, result.getFamilyMap(FAMILY1).size());
assertTrue(result.getColumnCells(FAMILY1, QUALIFIER2).isEmpty());
assertEquals(1, result.getColumnCells(FAMILY1, QUALIFIER1).size());
assertEquals(1, result.getColumnCells(FAMILY1, QUALIFIER3).size());
rows++;
}
assertEquals(100, rows);
ht.close();
}
示例11: invokeBulkDeleteProtocol
import org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteRequest.DeleteType; //导入依赖的package包/类
private long invokeBulkDeleteProtocol(byte[] tableName, final Scan scan, final int rowBatchSize,
final DeleteType deleteType, final Long timeStamp) throws Throwable {
HTable ht = new HTable(TEST_UTIL.getConfiguration(), tableName);
long noOfDeletedRows = 0L;
Batch.Call<BulkDeleteService, BulkDeleteResponse> callable =
new Batch.Call<BulkDeleteService, BulkDeleteResponse>() {
ServerRpcController controller = new ServerRpcController();
BlockingRpcCallback<BulkDeleteResponse> rpcCallback =
new BlockingRpcCallback<BulkDeleteResponse>();
public BulkDeleteResponse call(BulkDeleteService service) throws IOException {
Builder builder = BulkDeleteRequest.newBuilder();
builder.setScan(ProtobufUtil.toScan(scan));
builder.setDeleteType(deleteType);
builder.setRowBatchSize(rowBatchSize);
if (timeStamp != null) {
builder.setTimestamp(timeStamp);
}
service.delete(controller, builder.build(), rpcCallback);
return rpcCallback.get();
}
};
Map<byte[], BulkDeleteResponse> result = ht.coprocessorService(BulkDeleteService.class, scan
.getStartRow(), scan.getStopRow(), callable);
for (BulkDeleteResponse response : result.values()) {
noOfDeletedRows += response.getRowsDeleted();
}
ht.close();
return noOfDeletedRows;
}
示例12: testBulkDeleteWithConditionBasedDelete
import org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteRequest.DeleteType; //导入依赖的package包/类
public void testBulkDeleteWithConditionBasedDelete() throws Throwable {
byte[] tableName = Bytes.toBytes("testBulkDeleteWithConditionBasedDelete");
HTable ht = createTable(tableName);
List<Put> puts = new ArrayList<Put>(100);
for (int j = 0; j < 100; j++) {
byte[] rowkey = Bytes.toBytes(j);
String value = (j % 10 == 0) ? "v1" : "v2";
puts.add(createPut(rowkey, value));
}
ht.put(puts);
Scan scan = new Scan();
FilterList fl = new FilterList(Operator.MUST_PASS_ALL);
SingleColumnValueFilter scvf = new SingleColumnValueFilter(FAMILY1, QUALIFIER3,
CompareOp.EQUAL, Bytes.toBytes("v1"));
// fl.addFilter(new FirstKeyOnlyFilter());
fl.addFilter(scvf);
scan.setFilter(fl);
// Deleting all the rows where cf1:c1=v1
long noOfRowsDeleted = invokeBulkDeleteProtocol(tableName, scan, 500, DeleteType.ROW, null);
assertEquals(10, noOfRowsDeleted);
int rows = 0;
for (Result result : ht.getScanner(new Scan())) {
rows++;
}
assertEquals(90, rows);
ht.close();
}
示例13: testBulkDeleteColumn
import org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteRequest.DeleteType; //导入依赖的package包/类
public void testBulkDeleteColumn() throws Throwable {
byte[] tableName = Bytes.toBytes("testBulkDeleteColumn");
HTable ht = createTable(tableName);
List<Put> puts = new ArrayList<Put>(100);
for (int j = 0; j < 100; j++) {
byte[] rowkey = Bytes.toBytes(j);
String value = (j % 10 == 0) ? "v1" : "v2";
puts.add(createPut(rowkey, value));
}
ht.put(puts);
Scan scan = new Scan();
scan.addColumn(FAMILY1, QUALIFIER2);
// Delete the column cf1:col2
long noOfRowsDeleted = invokeBulkDeleteProtocol(tableName, scan, 500, DeleteType.COLUMN, null);
assertEquals(100, noOfRowsDeleted);
int rows = 0;
for (Result result : ht.getScanner(new Scan())) {
assertEquals(2, result.getFamilyMap(FAMILY1).size());
assertTrue(result.getColumnCells(FAMILY1, QUALIFIER2).isEmpty());
assertEquals(1, result.getColumnCells(FAMILY1, QUALIFIER1).size());
assertEquals(1, result.getColumnCells(FAMILY1, QUALIFIER3).size());
rows++;
}
assertEquals(100, rows);
ht.close();
}
示例14: testBulkDeleteFamily
import org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteRequest.DeleteType; //导入依赖的package包/类
public void testBulkDeleteFamily() throws Throwable {
byte[] tableName = Bytes.toBytes("testBulkDeleteFamily");
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
htd.addFamily(new HColumnDescriptor(FAMILY1));
htd.addFamily(new HColumnDescriptor(FAMILY2));
TEST_UTIL.getHBaseAdmin().createTable(htd, Bytes.toBytes(0), Bytes.toBytes(120), 5);
HTable ht = new HTable(TEST_UTIL.getConfiguration(), tableName);
List<Put> puts = new ArrayList<Put>(100);
for (int j = 0; j < 100; j++) {
Put put = new Put(Bytes.toBytes(j));
put.add(FAMILY1, QUALIFIER1, "v1".getBytes());
put.add(FAMILY2, QUALIFIER2, "v2".getBytes());
puts.add(put);
}
ht.put(puts);
Scan scan = new Scan();
scan.addFamily(FAMILY1);
// Delete the column family cf1
long noOfRowsDeleted = invokeBulkDeleteProtocol(tableName, scan, 500, DeleteType.FAMILY, null);
assertEquals(100, noOfRowsDeleted);
int rows = 0;
for (Result result : ht.getScanner(new Scan())) {
assertTrue(result.getFamilyMap(FAMILY1).isEmpty());
assertEquals(1, result.getColumnCells(FAMILY2, QUALIFIER2).size());
rows++;
}
assertEquals(100, rows);
ht.close();
}
示例15: invokeBulkDeleteProtocol
import org.apache.hadoop.hbase.coprocessor.example.generated.BulkDeleteProtos.BulkDeleteRequest.DeleteType; //导入依赖的package包/类
private long invokeBulkDeleteProtocol(byte[] tableName, final Scan scan, final int rowBatchSize,
final DeleteType deleteType, final Long timeStamp) throws Throwable {
HTable ht = new HTable(TEST_UTIL.getConfiguration(), tableName);
long noOfDeletedRows = 0L;
Batch.Call<BulkDeleteService, BulkDeleteResponse> callable =
new Batch.Call<BulkDeleteService, BulkDeleteResponse>() {
ServerRpcController controller = new ServerRpcController();
BlockingRpcCallback<BulkDeleteResponse> rpcCallback =
new BlockingRpcCallback<BulkDeleteResponse>();
public BulkDeleteResponse call(BulkDeleteService service) throws IOException {
Builder builder = BulkDeleteRequest.newBuilder();
builder.setScan(ProtobufUtil.toScan(scan));
builder.setDeleteType(deleteType);
builder.setRowBatchSize(rowBatchSize);
if (timeStamp != null) {
builder.setTimestamp(timeStamp);
}
service.delete(controller, builder.build(), rpcCallback);
return rpcCallback.get();
}
};
Map<byte[], BulkDeleteResponse> result = ht.coprocessorService(BulkDeleteService.class, scan
.getStartRow(), scan.getStopRow(), callable);
for (BulkDeleteResponse response : result.values()) {
noOfDeletedRows += response.getRowsDeleted();
}
return noOfDeletedRows;
}