本文整理汇总了Java中org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState类的典型用法代码示例。如果您正苦于以下问题:Java CompactionState类的具体用法?Java CompactionState怎么用?Java CompactionState使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
CompactionState类属于org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse包,在下文中一共展示了CompactionState类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getCompactionStateForRegion
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState; //导入依赖的package包/类
/**
* {@inheritDoc}
*/
@Override
public CompactionState getCompactionStateForRegion(final byte[] regionName)
throws IOException {
try {
Pair<HRegionInfo, ServerName> regionServerPair = getRegion(regionName);
if (regionServerPair == null) {
throw new IllegalArgumentException("Invalid region: " + Bytes.toStringBinary(regionName));
}
if (regionServerPair.getSecond() == null) {
throw new NoServerForRegionException(Bytes.toStringBinary(regionName));
}
ServerName sn = regionServerPair.getSecond();
AdminService.BlockingInterface admin = this.connection.getAdmin(sn);
GetRegionInfoRequest request = RequestConverter.buildGetRegionInfoRequest(
regionServerPair.getFirst().getRegionName(), true);
PayloadCarryingRpcController controller = rpcControllerFactory.newController();
// TODO: this does not do retries, it should. Set priority and timeout in controller
GetRegionInfoResponse response = admin.getRegionInfo(controller, request);
return response.getCompactionState();
} catch (ServiceException se) {
throw ProtobufUtil.getRemoteException(se);
}
}
示例2: getCompactionStateForRegion
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState; //导入依赖的package包/类
/**
* {@inheritDoc}
*/
@Override
public CompactionState getCompactionStateForRegion(final byte[] regionName)
throws IOException {
try {
Pair<HRegionInfo, ServerName> regionServerPair = getRegion(regionName);
if (regionServerPair == null) {
throw new IllegalArgumentException("Invalid region: " + Bytes.toStringBinary(regionName));
}
if (regionServerPair.getSecond() == null) {
throw new NoServerForRegionException(Bytes.toStringBinary(regionName));
}
ServerName sn = regionServerPair.getSecond();
AdminService.BlockingInterface admin = this.connection.getAdmin(sn);
GetRegionInfoRequest request = RequestConverter.buildGetRegionInfoRequest(
regionServerPair.getFirst().getRegionName(), true);
GetRegionInfoResponse response = admin.getRegionInfo(null, request);
return response.getCompactionState();
} catch (ServiceException se) {
throw ProtobufUtil.getRemoteException(se);
}
}
示例3: doHadoopWork
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState; //导入依赖的package包/类
@Override
protected void doHadoopWork() throws BuildException
{
try
{
if (!checkRegionExists(region))
{
throw new BuildException("Region " + region + " doesn't exist");
}
CompactionState state = getHBaseAdmin().getCompactionState(region);
setPropertyThreadSafe(property, state.toString());
}
catch (Exception e)
{
throw new BuildException("Failed to get " + region + " region compaction state", e);
}
}
示例4: doHadoopWork
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState; //导入依赖的package包/类
@Override
protected void doHadoopWork() throws BuildException
{
try
{
if (!getHBaseAdmin().tableExists(table))
{
throw new BuildException("Table " + table + " doesn't exist");
}
CompactionState state = getHBaseAdmin().getCompactionState(table);
setPropertyThreadSafe(property, state.toString());
}
catch (Exception e)
{
throw new BuildException("Failed to get " + table + " table compaction state", e);
}
}
示例5: getCompactionState
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState; //导入依赖的package包/类
/**
* Find out if a given region is in compaction now.
*
* @param regionId
* @return a CompactionState
*/
public static CompactionState getCompactionState(
final long regionId) {
Long key = Long.valueOf(regionId);
AtomicInteger major = majorCompactions.get(key);
AtomicInteger minor = minorCompactions.get(key);
int state = 0;
if (minor != null && minor.get() > 0) {
state += 1; // use 1 to indicate minor here
}
if (major != null && major.get() > 0) {
state += 2; // use 2 to indicate major here
}
switch (state) {
case 3: // 3 = 2 + 1, so both major and minor
return CompactionState.MAJOR_AND_MINOR;
case 2:
return CompactionState.MAJOR;
case 1:
return CompactionState.MINOR;
default:
return CompactionState.NONE;
}
}
示例6: getCompactionState
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState; //导入依赖的package包/类
/**
* @deprecated Use {@link #getCompactionState(org.apache.hadoop.hbase.TableName)} or {@link
* #getCompactionStateForRegion(byte[])} instead.
*/
@Deprecated
public CompactionState getCompactionState(final byte[] tableNameOrRegionName)
throws IOException, InterruptedException {
try {
return getCompactionStateForRegion(tableNameOrRegionName);
} catch (IllegalArgumentException e) {
// Invalid region, try table
return getCompactionState(TableName.valueOf(tableNameOrRegionName));
}
}
示例7: getCompactionState
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState; //导入依赖的package包/类
@Override public CompactionState getCompactionState() {
boolean hasMajor = majorInProgress.get() > 0, hasMinor = minorInProgress.get() > 0;
return (hasMajor ?
(hasMinor ? CompactionState.MAJOR_AND_MINOR : CompactionState.MAJOR) :
(hasMinor ? CompactionState.MINOR : CompactionState.NONE));
}
示例8: testMajorCompaction
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState; //导入依赖的package包/类
@Test(timeout=600000)
public void testMajorCompaction() throws IOException, InterruptedException {
compaction("testMajorCompaction", 8, CompactionState.MAJOR, false);
}
示例9: testMinorCompaction
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState; //导入依赖的package包/类
@Test(timeout=600000)
public void testMinorCompaction() throws IOException, InterruptedException {
compaction("testMinorCompaction", 15, CompactionState.MINOR, false);
}
示例10: testMajorCompactionOnFamily
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState; //导入依赖的package包/类
@Test(timeout=600000)
public void testMajorCompactionOnFamily() throws IOException, InterruptedException {
compaction("testMajorCompactionOnFamily", 8, CompactionState.MAJOR, true);
}
示例11: testMinorCompactionOnFamily
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState; //导入依赖的package包/类
@Test(timeout=600000)
public void testMinorCompactionOnFamily() throws IOException, InterruptedException {
compaction("testMinorCompactionOnFamily", 15, CompactionState.MINOR, true);
}
示例12: getCompactionState
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState; //导入依赖的package包/类
@Override
public CompactionState getCompactionState(TableName tn) throws IOException {
return wrappedHbaseAdmin.getCompactionState(tn);
}
示例13: getCompactionStateForRegion
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState; //导入依赖的package包/类
@Override
public CompactionState getCompactionStateForRegion(byte[] bytes) throws IOException {
return wrappedHbaseAdmin.getCompactionStateForRegion(bytes);
}
示例14: getCompactionState
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState; //导入依赖的package包/类
/**
* @return if a given region is in compaction now.
*/
public CompactionState getCompactionState() {
boolean hasMajor = majorInProgress.get() > 0, hasMinor = minorInProgress.get() > 0;
return (hasMajor ? (hasMinor ? CompactionState.MAJOR_AND_MINOR : CompactionState.MAJOR)
: (hasMinor ? CompactionState.MINOR : CompactionState.NONE));
}
示例15: testTags
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState; //导入依赖的package包/类
@Test
public void testTags() throws Exception {
Table table = null;
try {
TableName tableName = TableName.valueOf(TEST_NAME.getMethodName());
byte[] fam = Bytes.toBytes("info");
byte[] row = Bytes.toBytes("rowa");
// column names
byte[] qual = Bytes.toBytes("qual");
byte[] row1 = Bytes.toBytes("rowb");
byte[] row2 = Bytes.toBytes("rowc");
HTableDescriptor desc = new HTableDescriptor(tableName);
HColumnDescriptor colDesc = new HColumnDescriptor(fam);
colDesc.setBlockCacheEnabled(true);
// colDesc.setDataBlockEncoding(DataBlockEncoding.NONE);
colDesc.setDataBlockEncoding(DataBlockEncoding.PREFIX_TREE);
desc.addFamily(colDesc);
Admin admin = TEST_UTIL.getHBaseAdmin();
admin.createTable(desc);
byte[] value = Bytes.toBytes("value");
table = new HTable(TEST_UTIL.getConfiguration(), tableName);
Put put = new Put(row);
put.add(fam, qual, HConstants.LATEST_TIMESTAMP, value);
put.setAttribute("visibility", Bytes.toBytes("myTag"));
table.put(put);
admin.flush(tableName);
// We are lacking an API for confirming flush request compaction.
// Just sleep for a short time. We won't be able to confirm flush
// completion but the test won't hang now or in the future if
// default compaction policy causes compaction between flush and
// when we go to confirm it.
Thread.sleep(1000);
Put put1 = new Put(row1);
byte[] value1 = Bytes.toBytes("1000dfsdf");
put1.add(fam, qual, HConstants.LATEST_TIMESTAMP, value1);
// put1.setAttribute("visibility", Bytes.toBytes("myTag3"));
table.put(put1);
admin.flush(tableName);
Thread.sleep(1000);
Put put2 = new Put(row2);
byte[] value2 = Bytes.toBytes("1000dfsdf");
put2.add(fam, qual, HConstants.LATEST_TIMESTAMP, value2);
put2.setAttribute("visibility", Bytes.toBytes("myTag3"));
table.put(put2);
admin.flush(tableName);
Thread.sleep(1000);
result(fam, row, qual, row2, table, value, value2, row1, value1);
admin.compact(tableName);
while (admin.getCompactionState(tableName) != CompactionState.NONE) {
Thread.sleep(10);
}
result(fam, row, qual, row2, table, value, value2, row1, value1);
} finally {
if (table != null) {
table.close();
}
}
}