本文整理汇总了Java中org.apache.hadoop.hbase.ipc.HRegionInterface.getStoreFileList方法的典型用法代码示例。如果您正苦于以下问题:Java HRegionInterface.getStoreFileList方法的具体用法?Java HRegionInterface.getStoreFileList怎么用?Java HRegionInterface.getStoreFileList使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hbase.ipc.HRegionInterface
的用法示例。
在下文中一共展示了HRegionInterface.getStoreFileList方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: compactAllRegionPerRSThatNeedIt
import org.apache.hadoop.hbase.ipc.HRegionInterface; //导入方法依赖的package包/类
public void compactAllRegionPerRSThatNeedIt(int minStoreFiles, String columnFamily) throws IOException, InterruptedException {
System.out.println("Table Name:" + tableName);
for (HRegionInfo region: regions) {
System.out.println("Region: " + Bytes.toString(region.getRegionName()) + " " + region.getRegionId());
System.out.println(" StartKey: " + Bytes.toString(region.getStartKey()) + ", EndKey: " + Bytes.toString(region.getEndKey()));
System.out.println(" hasSplit: " + region.isSplit());
System.out.println(" hasSplitPatant: " + region.isSplitParent());
System.out.println(" maxFileSize: " + tableDescriptor.getMaxFileSize());
System.out.println(" SplitPolicy: " + tableDescriptor.getRegionSplitPolicyClassName());
List<HRegionLocation> regionLocationList = table.getRegionsInRange(region.getStartKey(), region.getEndKey());
for (HRegionLocation regionLocation: regionLocationList) {
HRegionInterface rs = hConnection.getHRegionConnection(regionLocation.getHostname(), regionLocation.getPort());
List<String> storeFileList = rs.getStoreFileList(region.getRegionName());
System.out.println(" Compaction State: " + rs.getCompactionState(region.getRegionName()));
System.out.println(" Store File Count: " + storeFileList.size());
if ( storeFileList.size() > minStoreFiles) {
System.out.println(" !!! Compacting !!!");
rs.compactRegion(region, true, Bytes.toBytes(columnFamily));
}
}
}
}
示例2: compactSingleRegionPerRSThatNeedIt
import org.apache.hadoop.hbase.ipc.HRegionInterface; //导入方法依赖的package包/类
public void compactSingleRegionPerRSThatNeedIt(int minStoreFiles, String columnFamily) throws IOException, InterruptedException {
System.out.println("Table Name:" + tableName);
for (HRegionInfo region: regions) {
System.out.println("Region: " + Bytes.toString(region.getRegionName()) + " " + region.getRegionId());
System.out.println(" StartKey: " + Bytes.toString(region.getStartKey()) + ", EndKey: " + Bytes.toString(region.getEndKey()));
System.out.println(" hasSplit: " + region.isSplit());
System.out.println(" hasSplitPatant: " + region.isSplitParent());
System.out.println(" maxFileSize: " + tableDescriptor.getMaxFileSize());
System.out.println(" SplitPolicy: " + tableDescriptor.getRegionSplitPolicyClassName());
List<HRegionLocation> regionLocationList = table.getRegionsInRange(region.getStartKey(), region.getEndKey());
for (HRegionLocation regionLocation: regionLocationList) {
HRegionInterface rs = hConnection.getHRegionConnection(regionLocation.getHostname(), regionLocation.getPort());
List<String> storeFileList = rs.getStoreFileList(region.getRegionName());
System.out.println(" Compaction State: " + rs.getCompactionState(region.getRegionName()));
System.out.println(" Store File Count: " + storeFileList.size());
if (rs.getCompactionState(region.getRegionName()).equals("NONE") && storeFileList.size() > minStoreFiles) {
System.out.println(" !!! Compacting !!!");
//admin.compact(region.getRegionName());
rs.compactRegion(region, true, Bytes.toBytes(columnFamily));
}
}
}
}
示例3: outputFull
import org.apache.hadoop.hbase.ipc.HRegionInterface; //导入方法依赖的package包/类
public void outputFull() throws IOException {
System.out.println("Table Name:" + tableName);
System.out.println("Region Count: " + regions.size());
for (HRegionInfo region: regions) {
System.out.println("Region: " + Bytes.toString(region.getRegionName()) + " " + region.getRegionId());
System.out.println(" StartKey: " + Bytes.toString(region.getStartKey()) + ", EndKey: " + Bytes.toString(region.getEndKey()));
System.out.println(" hasSplit: " + region.isSplit());
System.out.println(" hasSplitPatant: " + region.isSplitParent());
System.out.println(" maxFileSize: " + tableDescriptor.getMaxFileSize());
System.out.println(" SplitPolicy: " + tableDescriptor.getRegionSplitPolicyClassName());
ConstantSizeRegionSplitPolicy k;
Store s;
List<HRegionLocation> regionLocationList = table.getRegionsInRange(region.getStartKey(), region.getEndKey());
for (HRegionLocation regionLocation: regionLocationList) {
HRegionInterface rs = hConnection.getHRegionConnection(regionLocation.getHostname(), regionLocation.getPort());
List<String> storeFileList = rs.getStoreFileList(region.getRegionName());
System.out.println(" Compaction State: " + rs.getCompactionState(region.getRegionName()));
System.out.println(" StoreFiles: {");
int counter = 0;
for (String storeFile: storeFileList) {
System.out.println(" " + counter++ + ":" + storeFile);
System.out.println(" Size: " + hdfs.getFileStatus(new Path(storeFile)).getLen());
}
System.out.println(" }");
}
}
}
示例4: outputCompact
import org.apache.hadoop.hbase.ipc.HRegionInterface; //导入方法依赖的package包/类
public void outputCompact() throws IOException {
long time = System.currentTimeMillis();
for (HRegionInfo region: regions) {
StringBuilder strBuilder = new StringBuilder(time + ",");
strBuilder.append(region.getRegionId() + ",");
strBuilder.append(Bytes.toString(region.getStartKey()) + "," + Bytes.toString(region.getEndKey())+ ",");
strBuilder.append(region.isSplit() + "," + region.isSplitParent() +",");
strBuilder.append(tableDescriptor.getMaxFileSize() + "," );
List<HRegionLocation> regionLocationList = table.getRegionsInRange(region.getStartKey(), region.getEndKey());
for (HRegionLocation regionLocation: regionLocationList) {
HRegionInterface rs = hConnection.getHRegionConnection(regionLocation.getHostname(), regionLocation.getPort());
List<String> storeFileList = rs.getStoreFileList(region.getRegionName());
strBuilder.append( rs.getCompactionState(region.getRegionName()) + ",");
strBuilder.append(storeFileList.size() + ",");
for (String storeFile: storeFileList) {
strBuilder.append(hdfs.getFileStatus(new Path(storeFile)).getLen() + ",");
}
}
System.out.println(strBuilder.toString());
}
}