本文整理汇总了Java中org.apache.hadoop.hbase.ipc.HRegionInterface.compactRegion方法的典型用法代码示例。如果您正苦于以下问题:Java HRegionInterface.compactRegion方法的具体用法?Java HRegionInterface.compactRegion怎么用?Java HRegionInterface.compactRegion使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hbase.ipc.HRegionInterface
的用法示例。
在下文中一共展示了HRegionInterface.compactRegion方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: compact
import org.apache.hadoop.hbase.ipc.HRegionInterface; //导入方法依赖的package包/类
private void compact(final ServerName sn, final HRegionInfo hri, final boolean major,
final byte[] family) throws IOException {
HRegionInterface rs = this.connection.getHRegionConnection(sn.getHostname(), sn.getPort());
if (family != null) {
try {
rs.compactRegion(hri, major, family);
} catch (IOException ioe) {
String notFoundMsg = "java.lang.NoSuchMethodException: org.apache.hadoop.hbase.ipc.HRegionInterface."
+ "compactRegion(org.apache.hadoop.hbase.HRegionInfo, boolean, [B)";
if (ioe.getMessage().contains(notFoundMsg)) {
throw new IOException("per-column family compaction not supported on this version "
+ "of the HBase server. You may still compact at the table or region level by "
+ "omitting the column family name. Alternatively, you can upgrade the HBase server");
}
throw ioe;
}
} else {
rs.compactRegion(hri, major);
}
}
示例2: compact
import org.apache.hadoop.hbase.ipc.HRegionInterface; //导入方法依赖的package包/类
private void compact(final ServerName sn, final HRegionInfo hri,
final boolean major, final byte [] family)
throws IOException {
HRegionInterface rs =
this.connection.getHRegionConnection(sn.getHostname(), sn.getPort());
if (family != null) {
try {
rs.compactRegion(hri, major, family);
} catch (IOException ioe) {
String notFoundMsg = "java.lang.NoSuchMethodException: org.apache.hadoop.hbase.ipc.HRegionInterface."
+ "compactRegion(org.apache.hadoop.hbase.HRegionInfo, boolean, [B)";
if (ioe.getMessage().contains(notFoundMsg)) {
throw new IOException("per-column family compaction not supported on this version "
+ "of the HBase server. You may still compact at the table or region level by "
+ "omitting the column family name. Alternatively, you can upgrade the HBase server");
}
throw ioe;
}
} else {
rs.compactRegion(hri, major);
}
}
示例3: compactAllRegionPerRSThatNeedIt
import org.apache.hadoop.hbase.ipc.HRegionInterface; //导入方法依赖的package包/类
public void compactAllRegionPerRSThatNeedIt(int minStoreFiles, String columnFamily) throws IOException, InterruptedException {
System.out.println("Table Name:" + tableName);
for (HRegionInfo region: regions) {
System.out.println("Region: " + Bytes.toString(region.getRegionName()) + " " + region.getRegionId());
System.out.println(" StartKey: " + Bytes.toString(region.getStartKey()) + ", EndKey: " + Bytes.toString(region.getEndKey()));
System.out.println(" hasSplit: " + region.isSplit());
System.out.println(" hasSplitPatant: " + region.isSplitParent());
System.out.println(" maxFileSize: " + tableDescriptor.getMaxFileSize());
System.out.println(" SplitPolicy: " + tableDescriptor.getRegionSplitPolicyClassName());
List<HRegionLocation> regionLocationList = table.getRegionsInRange(region.getStartKey(), region.getEndKey());
for (HRegionLocation regionLocation: regionLocationList) {
HRegionInterface rs = hConnection.getHRegionConnection(regionLocation.getHostname(), regionLocation.getPort());
List<String> storeFileList = rs.getStoreFileList(region.getRegionName());
System.out.println(" Compaction State: " + rs.getCompactionState(region.getRegionName()));
System.out.println(" Store File Count: " + storeFileList.size());
if ( storeFileList.size() > minStoreFiles) {
System.out.println(" !!! Compacting !!!");
rs.compactRegion(region, true, Bytes.toBytes(columnFamily));
}
}
}
}
示例4: compact
import org.apache.hadoop.hbase.ipc.HRegionInterface; //导入方法依赖的package包/类
private void compact(final ServerName sn, final HRegionInfo hri,
final boolean major)
throws IOException {
HRegionInterface rs =
this.connection.getHRegionConnection(sn.getHostname(), sn.getPort());
rs.compactRegion(hri, major);
}
示例5: compactSingleRegionPerRSThatNeedIt
import org.apache.hadoop.hbase.ipc.HRegionInterface; //导入方法依赖的package包/类
public void compactSingleRegionPerRSThatNeedIt(int minStoreFiles, String columnFamily) throws IOException, InterruptedException {
System.out.println("Table Name:" + tableName);
for (HRegionInfo region: regions) {
System.out.println("Region: " + Bytes.toString(region.getRegionName()) + " " + region.getRegionId());
System.out.println(" StartKey: " + Bytes.toString(region.getStartKey()) + ", EndKey: " + Bytes.toString(region.getEndKey()));
System.out.println(" hasSplit: " + region.isSplit());
System.out.println(" hasSplitPatant: " + region.isSplitParent());
System.out.println(" maxFileSize: " + tableDescriptor.getMaxFileSize());
System.out.println(" SplitPolicy: " + tableDescriptor.getRegionSplitPolicyClassName());
List<HRegionLocation> regionLocationList = table.getRegionsInRange(region.getStartKey(), region.getEndKey());
for (HRegionLocation regionLocation: regionLocationList) {
HRegionInterface rs = hConnection.getHRegionConnection(regionLocation.getHostname(), regionLocation.getPort());
List<String> storeFileList = rs.getStoreFileList(region.getRegionName());
System.out.println(" Compaction State: " + rs.getCompactionState(region.getRegionName()));
System.out.println(" Store File Count: " + storeFileList.size());
if (rs.getCompactionState(region.getRegionName()).equals("NONE") && storeFileList.size() > minStoreFiles) {
System.out.println(" !!! Compacting !!!");
//admin.compact(region.getRegionName());
rs.compactRegion(region, true, Bytes.toBytes(columnFamily));
}
}
}
}