本文整理汇总了Java中org.apache.hadoop.hbase.HDFSBlocksDistribution.add方法的典型用法代码示例。如果您正苦于以下问题:Java HDFSBlocksDistribution.add方法的具体用法?Java HDFSBlocksDistribution.add怎么用?Java HDFSBlocksDistribution.add使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hbase.HDFSBlocksDistribution
的用法示例。
在下文中一共展示了HDFSBlocksDistribution.add方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: calculateRegionServerLocalities
import org.apache.hadoop.hbase.HDFSBlocksDistribution; //导入方法依赖的package包/类
void calculateRegionServerLocalities() {
if (regionFinder == null) {
LOG.warn("Region location finder found null, skipping locality calculations.");
return;
}
for (int i = 0; i < regionsPerServer.length; i++) {
HDFSBlocksDistribution distribution = new HDFSBlocksDistribution();
if (regionsPerServer[i].length > 0) {
for (int j = 0; j < regionsPerServer[i].length; j++) {
int regionIndex = regionsPerServer[i][j];
distribution.add(regionFinder.getBlockDistribution(regions[regionIndex]));
}
} else {
LOG.debug("Server " + servers[i].getHostname() + " had 0 regions.");
}
localityPerServer[i] = distribution.getBlockLocalityIndex(servers[i].getHostname());
}
}
示例2: getStoreDirHosts
import org.apache.hadoop.hbase.HDFSBlocksDistribution; //导入方法依赖的package包/类
/**
* return the top hosts of the store files, used by the Split
*/
private static String[] getStoreDirHosts(final FileSystem fs, final Path path)
throws IOException {
FileStatus[] files = FSUtils.listStatus(fs, path);
if (files == null) {
return new String[] {};
}
HDFSBlocksDistribution hdfsBlocksDistribution = new HDFSBlocksDistribution();
for (FileStatus hfileStatus: files) {
HDFSBlocksDistribution storeFileBlocksDistribution =
FSUtils.computeHDFSBlocksDistribution(fs, hfileStatus, 0, hfileStatus.getLen());
hdfsBlocksDistribution.add(storeFileBlocksDistribution);
}
List<String> hosts = hdfsBlocksDistribution.getTopHosts();
return hosts.toArray(new String[hosts.size()]);
}
示例3: computeHDFSBlocksDistribution
import org.apache.hadoop.hbase.HDFSBlocksDistribution; //导入方法依赖的package包/类
/**
* This is a helper function to compute HDFS block distribution on demand
*
* @param conf configuration
* @param tableDescriptor HTableDescriptor of the table
* @param regionInfo encoded name of the region
* @param tablePath the table directory
* @return The HDFS blocks distribution for the given region.
* @throws IOException
*/
public static HDFSBlocksDistribution computeHDFSBlocksDistribution(final Configuration conf,
final HTableDescriptor tableDescriptor, final HRegionInfo regionInfo, Path tablePath)
throws IOException {
HDFSBlocksDistribution hdfsBlocksDistribution = new HDFSBlocksDistribution();
FileSystem fs = tablePath.getFileSystem(conf);
HRegionFileSystem regionFs = new HRegionFileSystem(conf, fs, tablePath, regionInfo);
for (HColumnDescriptor family : tableDescriptor.getFamilies()) {
Collection<StoreFileInfo> storeFiles = regionFs.getStoreFiles(family.getNameAsString());
if (storeFiles == null) continue;
for (StoreFileInfo storeFileInfo : storeFiles) {
try {
hdfsBlocksDistribution.add(storeFileInfo.computeHDFSBlocksDistribution(fs));
} catch (IOException ioe) {
LOG.warn("Error getting hdfs block distribution for " + storeFileInfo);
}
}
}
return hdfsBlocksDistribution;
}
示例4: getStoreDirHosts
import org.apache.hadoop.hbase.HDFSBlocksDistribution; //导入方法依赖的package包/类
/**
* return the top hosts of the store files, used by the Split
*/
private static String[] getStoreDirHosts(final FileSystem fs, final Path path)
throws IOException {
FileStatus[] files = FSUtils.listStatus(fs, path, null);
if (files == null) {
return new String[] {};
}
HDFSBlocksDistribution hdfsBlocksDistribution = new HDFSBlocksDistribution();
for (FileStatus hfileStatus: files) {
HDFSBlocksDistribution storeFileBlocksDistribution =
FSUtils.computeHDFSBlocksDistribution(fs, hfileStatus, 0, hfileStatus.getLen());
hdfsBlocksDistribution.add(storeFileBlocksDistribution);
}
List<String> hosts = hdfsBlocksDistribution.getTopHosts();
return hosts.toArray(new String[hosts.size()]);
}
示例5: computeHDFSBlocksDistribution
import org.apache.hadoop.hbase.HDFSBlocksDistribution; //导入方法依赖的package包/类
/**
* This is a helper function to compute HDFS block distribution on demand
* @param conf configuration
* @param tableDescriptor HTableDescriptor of the table
* @param regionEncodedName encoded name of the region
* @return The HDFS blocks distribution for the given region.
* @throws IOException
*/
static public HDFSBlocksDistribution computeHDFSBlocksDistribution(Configuration conf,
HTableDescriptor tableDescriptor, String regionEncodedName) throws IOException {
HDFSBlocksDistribution hdfsBlocksDistribution = new HDFSBlocksDistribution();
Path tablePath = FSUtils.getTablePath(FSUtils.getRootDir(conf), tableDescriptor.getName());
FileSystem fs = tablePath.getFileSystem(conf);
for (HColumnDescriptor family : tableDescriptor.getFamilies()) {
Path storeHomeDir = Store.getStoreHomedir(tablePath, regionEncodedName, family.getName());
if (!fs.exists(storeHomeDir)) continue;
FileStatus[] hfilesStatus = null;
hfilesStatus = fs.listStatus(storeHomeDir);
for (FileStatus hfileStatus : hfilesStatus) {
HDFSBlocksDistribution storeFileBlocksDistribution =
FSUtils.computeHDFSBlocksDistribution(fs, hfileStatus, 0, hfileStatus.getLen());
hdfsBlocksDistribution.add(storeFileBlocksDistribution);
}
}
return hdfsBlocksDistribution;
}
示例6: computeHDFSBlocksDistribution
import org.apache.hadoop.hbase.HDFSBlocksDistribution; //导入方法依赖的package包/类
/**
* This is a helper function to compute HDFS block distribution on demand
*
* @param conf configuration
* @param tableDescriptor HTableDescriptor of the table
* @param regionInfo encoded name of the region
* @param tablePath the table directory
* @return The HDFS blocks distribution for the given region.
* @throws IOException
*/
public static HDFSBlocksDistribution computeHDFSBlocksDistribution(final Configuration conf,
final HTableDescriptor tableDescriptor, final HRegionInfo regionInfo, Path tablePath)
throws IOException {
HDFSBlocksDistribution hdfsBlocksDistribution = new HDFSBlocksDistribution();
FileSystem fs = tablePath.getFileSystem(conf);
HRegionFileSystem regionFs = new HRegionFileSystem(conf, fs, tablePath, regionInfo);
for (HColumnDescriptor family : tableDescriptor.getFamilies()) {
Collection<StoreFileInfo> storeFiles = regionFs.getStoreFiles(family.getNameAsString());
if (storeFiles == null) continue;
for (StoreFileInfo storeFileInfo : storeFiles) {
hdfsBlocksDistribution.add(storeFileInfo.computeHDFSBlocksDistribution(fs));
}
}
return hdfsBlocksDistribution;
}
示例7: computeHDFSBlocksDistribution
import org.apache.hadoop.hbase.HDFSBlocksDistribution; //导入方法依赖的package包/类
/**
* This is a helper function to compute HDFS block distribution on demand
* @param conf configuration
* @param tableDescriptor HTableDescriptor of the table
* @param regionInfo encoded name of the region
* @param tablePath the table directory
* @return The HDFS blocks distribution for the given region.
* @throws IOException
*/
public static HDFSBlocksDistribution computeHDFSBlocksDistribution(final Configuration conf,
final HTableDescriptor tableDescriptor, final HRegionInfo regionInfo, Path tablePath)
throws IOException {
HDFSBlocksDistribution hdfsBlocksDistribution = new HDFSBlocksDistribution();
FileSystem fs = tablePath.getFileSystem(conf);
HRegionFileSystem regionFs = new HRegionFileSystem(conf, fs, tablePath, regionInfo);
for (HColumnDescriptor family: tableDescriptor.getFamilies()) {
Collection<StoreFileInfo> storeFiles = regionFs.getStoreFiles(family.getNameAsString());
if (storeFiles == null) continue;
for (StoreFileInfo storeFileInfo : storeFiles) {
hdfsBlocksDistribution.add(storeFileInfo.computeHDFSBlocksDistribution(fs));
}
}
return hdfsBlocksDistribution;
}
示例8: computeHDFSBlocksDistribution
import org.apache.hadoop.hbase.HDFSBlocksDistribution; //导入方法依赖的package包/类
/**
* This is a helper function to compute HDFS block distribution on demand
* @param conf configuration
* @param tableDescriptor HTableDescriptor of the table
* @param regionInfo encoded name of the region
* @return The HDFS blocks distribution for the given region.
* @throws IOException
*/
public static HDFSBlocksDistribution computeHDFSBlocksDistribution(final Configuration conf,
final HTableDescriptor tableDescriptor, final HRegionInfo regionInfo) throws IOException {
HDFSBlocksDistribution hdfsBlocksDistribution = new HDFSBlocksDistribution();
Path tablePath = FSUtils.getTableDir(FSUtils.getRootDir(conf), tableDescriptor.getTableName());
FileSystem fs = tablePath.getFileSystem(conf);
HRegionFileSystem regionFs = new HRegionFileSystem(conf, fs, tablePath, regionInfo);
for (HColumnDescriptor family: tableDescriptor.getFamilies()) {
Collection<StoreFileInfo> storeFiles = regionFs.getStoreFiles(family.getNameAsString());
if (storeFiles == null) continue;
for (StoreFileInfo storeFileInfo : storeFiles) {
hdfsBlocksDistribution.add(storeFileInfo.computeHDFSBlocksDistribution(fs));
}
}
return hdfsBlocksDistribution;
}
示例9: getHDFSBlocksDistribution
import org.apache.hadoop.hbase.HDFSBlocksDistribution; //导入方法依赖的package包/类
@Override public HDFSBlocksDistribution getHDFSBlocksDistribution() {
HDFSBlocksDistribution hdfsBlocksDistribution = new HDFSBlocksDistribution();
synchronized (this.stores) {
for (Store store : this.stores.values()) {
Collection<StoreFile> storeFiles = store.getStorefiles();
if (storeFiles == null) continue;
for (StoreFile sf : storeFiles) {
HDFSBlocksDistribution storeFileBlocksDistribution = sf.getHDFSBlockDistribution();
hdfsBlocksDistribution.add(storeFileBlocksDistribution);
}
}
}
return hdfsBlocksDistribution;
}
示例10: getHDFSBlocksDistribution
import org.apache.hadoop.hbase.HDFSBlocksDistribution; //导入方法依赖的package包/类
/**
* This function will return the HDFS blocks distribution based on the data captured when HFile is
* created
* @return The HDFS blocks distribution for the region.
*/
public HDFSBlocksDistribution getHDFSBlocksDistribution() {
HDFSBlocksDistribution hdfsBlocksDistribution = new HDFSBlocksDistribution();
synchronized (this.stores) {
for (Store store : this.stores.values()) {
for (StoreFile sf : store.getStorefiles()) {
HDFSBlocksDistribution storeFileBlocksDistribution = sf.getHDFSBlockDistribution();
hdfsBlocksDistribution.add(storeFileBlocksDistribution);
}
}
}
return hdfsBlocksDistribution;
}
示例11: getHDFSBlocksDistribution
import org.apache.hadoop.hbase.HDFSBlocksDistribution; //导入方法依赖的package包/类
/**
* This function will return the HDFS blocks distribution based on the data
* captured when HFile is created
*
* @return The HDFS blocks distribution for the region.
*/
public HDFSBlocksDistribution getHDFSBlocksDistribution() {
HDFSBlocksDistribution hdfsBlocksDistribution =
new HDFSBlocksDistribution();
synchronized (this.stores) {
for (Store store : this.stores.values()) {
for (StoreFile sf : store.getStorefiles()) {
HDFSBlocksDistribution storeFileBlocksDistribution =
sf.getHDFSBlockDistribution();
hdfsBlocksDistribution.add(storeFileBlocksDistribution);
}
}
}
return hdfsBlocksDistribution;
}
示例12: getHDFSBlocksDistribution
import org.apache.hadoop.hbase.HDFSBlocksDistribution; //导入方法依赖的package包/类
/**
* This function will return the HDFS blocks distribution based on the data
* captured when HFile is created
* @return The HDFS blocks distribution for the region.
*/
public HDFSBlocksDistribution getHDFSBlocksDistribution() {
HDFSBlocksDistribution hdfsBlocksDistribution =
new HDFSBlocksDistribution();
synchronized (this.stores) {
for (Store store : this.stores.values()) {
for (StoreFile sf : store.getStorefiles()) {
HDFSBlocksDistribution storeFileBlocksDistribution =
sf.getHDFSBlockDistribution();
hdfsBlocksDistribution.add(storeFileBlocksDistribution);
}
}
}
return hdfsBlocksDistribution;
}
示例13: computeHDFSBlocksDistribution
import org.apache.hadoop.hbase.HDFSBlocksDistribution; //导入方法依赖的package包/类
/**
* This is a helper function to compute HDFS block distribution on demand
* @param conf configuration
* @param tableDescriptor HTableDescriptor of the table
* @param regionEncodedName encoded name of the region
* @return The HDFS blocks distribution for the given region.
* @throws IOException
*/
static public HDFSBlocksDistribution computeHDFSBlocksDistribution(
Configuration conf, HTableDescriptor tableDescriptor,
String regionEncodedName) throws IOException {
HDFSBlocksDistribution hdfsBlocksDistribution =
new HDFSBlocksDistribution();
Path tablePath = FSUtils.getTablePath(FSUtils.getRootDir(conf),
tableDescriptor.getName());
FileSystem fs = tablePath.getFileSystem(conf);
for (HColumnDescriptor family: tableDescriptor.getFamilies()) {
Path storeHomeDir = Store.getStoreHomedir(tablePath, regionEncodedName,
family.getName());
if (!fs.exists(storeHomeDir))continue;
FileStatus[] hfilesStatus = null;
hfilesStatus = fs.listStatus(storeHomeDir);
for (FileStatus hfileStatus : hfilesStatus) {
HDFSBlocksDistribution storeFileBlocksDistribution =
FSUtils.computeHDFSBlocksDistribution(fs, hfileStatus, 0,
hfileStatus.getLen());
hdfsBlocksDistribution.add(storeFileBlocksDistribution);
}
}
return hdfsBlocksDistribution;
}
示例14: computeHDFSBlocksDistribution
import org.apache.hadoop.hbase.HDFSBlocksDistribution; //导入方法依赖的package包/类
/**
* This is a helper function to compute HDFS block distribution on demand
* @param conf configuration
* @param tableDescriptor TableDescriptor of the table
* @param regionInfo encoded name of the region
* @param tablePath the table directory
* @return The HDFS blocks distribution for the given region.
* @throws IOException
*/
public static HDFSBlocksDistribution computeHDFSBlocksDistribution(Configuration conf,
TableDescriptor tableDescriptor, RegionInfo regionInfo, Path tablePath) throws IOException {
HDFSBlocksDistribution hdfsBlocksDistribution = new HDFSBlocksDistribution();
FileSystem fs = tablePath.getFileSystem(conf);
HRegionFileSystem regionFs = new HRegionFileSystem(conf, fs, tablePath, regionInfo);
for (ColumnFamilyDescriptor family : tableDescriptor.getColumnFamilies()) {
List<LocatedFileStatus> locatedFileStatusList = HRegionFileSystem
.getStoreFilesLocatedStatus(regionFs, family.getNameAsString(), true);
if (locatedFileStatusList == null) {
continue;
}
for (LocatedFileStatus status : locatedFileStatusList) {
Path p = status.getPath();
if (StoreFileInfo.isReference(p) || HFileLink.isHFileLink(p)) {
// Only construct StoreFileInfo object if its not a hfile, save obj
// creation
StoreFileInfo storeFileInfo = new StoreFileInfo(conf, fs, status);
hdfsBlocksDistribution.add(storeFileInfo
.computeHDFSBlocksDistribution(fs));
} else if (StoreFileInfo.isHFile(p)) {
// If its a HFile, then lets just add to the block distribution
// lets not create more objects here, not even another HDFSBlocksDistribution
FSUtils.addToHDFSBlocksDistribution(hdfsBlocksDistribution,
status.getBlockLocations());
} else {
throw new IOException("path=" + p
+ " doesn't look like a valid StoreFile");
}
}
}
return hdfsBlocksDistribution;
}
示例15: computeHDFSBlocksDistribution
import org.apache.hadoop.hbase.HDFSBlocksDistribution; //导入方法依赖的package包/类
/**
* This is a helper function to compute HDFS block distribution on demand
* @param conf configuration
* @param tableDescriptor HTableDescriptor of the table
* @param regionEncodedName encoded name of the region
* @return The HDFS blocks distribution for the given region.
* @throws IOException
*/
static public HDFSBlocksDistribution computeHDFSBlocksDistribution(
Configuration conf, HTableDescriptor tableDescriptor,
String regionEncodedName) throws IOException {
HDFSBlocksDistribution hdfsBlocksDistribution =
new HDFSBlocksDistribution();
Path tablePath = FSUtils.getTablePath(FSUtils.getRootDir(conf),
tableDescriptor.getName());
FileSystem fs = tablePath.getFileSystem(conf);
for (HColumnDescriptor family: tableDescriptor.getFamilies()) {
Path storeHomeDir = HStore.getStoreHomedir(tablePath, regionEncodedName,
family.getName());
if (!fs.exists(storeHomeDir))continue;
FileStatus[] hfilesStatus = null;
hfilesStatus = fs.listStatus(storeHomeDir);
for (FileStatus hfileStatus : hfilesStatus) {
HDFSBlocksDistribution storeFileBlocksDistribution =
FSUtils.computeHDFSBlocksDistribution(fs, hfileStatus, 0,
hfileStatus.getLen());
hdfsBlocksDistribution.add(storeFileBlocksDistribution);
}
}
return hdfsBlocksDistribution;
}