当前位置: 首页>>代码示例>>Java>>正文


Java HDFSBlocksDistribution.add方法代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.HDFSBlocksDistribution.add方法的典型用法代码示例。如果您正苦于以下问题:Java HDFSBlocksDistribution.add方法的具体用法?Java HDFSBlocksDistribution.add怎么用?Java HDFSBlocksDistribution.add使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hbase.HDFSBlocksDistribution的用法示例。


在下文中一共展示了HDFSBlocksDistribution.add方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: calculateRegionServerLocalities

import org.apache.hadoop.hbase.HDFSBlocksDistribution; //导入方法依赖的package包/类
void calculateRegionServerLocalities() {
  if (regionFinder == null) {
    LOG.warn("Region location finder found null, skipping locality calculations.");
    return;
  }
  for (int i = 0; i < regionsPerServer.length; i++) {
    HDFSBlocksDistribution distribution = new HDFSBlocksDistribution();
    if (regionsPerServer[i].length > 0) {
      for (int j = 0; j < regionsPerServer[i].length; j++) {
        int regionIndex = regionsPerServer[i][j];
        distribution.add(regionFinder.getBlockDistribution(regions[regionIndex]));
      }
    } else {
      LOG.debug("Server " + servers[i].getHostname() + " had 0 regions.");
    }
    localityPerServer[i] = distribution.getBlockLocalityIndex(servers[i].getHostname());
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:19,代码来源:BaseLoadBalancer.java

示例2: getStoreDirHosts

import org.apache.hadoop.hbase.HDFSBlocksDistribution; //导入方法依赖的package包/类
/**
 * return the top hosts of the store files, used by the Split
 */
private static String[] getStoreDirHosts(final FileSystem fs, final Path path)
    throws IOException {
  FileStatus[] files = FSUtils.listStatus(fs, path);
  if (files == null) {
    return new String[] {};
  }

  HDFSBlocksDistribution hdfsBlocksDistribution = new HDFSBlocksDistribution();
  for (FileStatus hfileStatus: files) {
    HDFSBlocksDistribution storeFileBlocksDistribution =
      FSUtils.computeHDFSBlocksDistribution(fs, hfileStatus, 0, hfileStatus.getLen());
    hdfsBlocksDistribution.add(storeFileBlocksDistribution);
  }

  List<String> hosts = hdfsBlocksDistribution.getTopHosts();
  return hosts.toArray(new String[hosts.size()]);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:21,代码来源:CompactionTool.java

示例3: computeHDFSBlocksDistribution

import org.apache.hadoop.hbase.HDFSBlocksDistribution; //导入方法依赖的package包/类
/**
 * This is a helper function to compute HDFS block distribution on demand
 *
 * @param conf            configuration
 * @param tableDescriptor HTableDescriptor of the table
 * @param regionInfo      encoded name of the region
 * @param tablePath       the table directory
 * @return The HDFS blocks distribution for the given region.
 * @throws IOException
 */
public static HDFSBlocksDistribution computeHDFSBlocksDistribution(final Configuration conf,
    final HTableDescriptor tableDescriptor, final HRegionInfo regionInfo, Path tablePath)
    throws IOException {
  HDFSBlocksDistribution hdfsBlocksDistribution = new HDFSBlocksDistribution();
  FileSystem fs = tablePath.getFileSystem(conf);

  HRegionFileSystem regionFs = new HRegionFileSystem(conf, fs, tablePath, regionInfo);
  for (HColumnDescriptor family : tableDescriptor.getFamilies()) {
    Collection<StoreFileInfo> storeFiles = regionFs.getStoreFiles(family.getNameAsString());
    if (storeFiles == null) continue;
    for (StoreFileInfo storeFileInfo : storeFiles) {
      try {
        hdfsBlocksDistribution.add(storeFileInfo.computeHDFSBlocksDistribution(fs));
      } catch (IOException ioe) {
        LOG.warn("Error getting hdfs block distribution for " + storeFileInfo);
      }
    }
  }
  return hdfsBlocksDistribution;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:31,代码来源:HRegion.java

示例4: getStoreDirHosts

import org.apache.hadoop.hbase.HDFSBlocksDistribution; //导入方法依赖的package包/类
/**
 * return the top hosts of the store files, used by the Split
 */
private static String[] getStoreDirHosts(final FileSystem fs, final Path path)
    throws IOException {
  FileStatus[] files = FSUtils.listStatus(fs, path, null);
  if (files == null) {
    return new String[] {};
  }

  HDFSBlocksDistribution hdfsBlocksDistribution = new HDFSBlocksDistribution();
  for (FileStatus hfileStatus: files) {
    HDFSBlocksDistribution storeFileBlocksDistribution =
      FSUtils.computeHDFSBlocksDistribution(fs, hfileStatus, 0, hfileStatus.getLen());
    hdfsBlocksDistribution.add(storeFileBlocksDistribution);
  }

  List<String> hosts = hdfsBlocksDistribution.getTopHosts();
  return hosts.toArray(new String[hosts.size()]);
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:21,代码来源:CompactionTool.java

示例5: computeHDFSBlocksDistribution

import org.apache.hadoop.hbase.HDFSBlocksDistribution; //导入方法依赖的package包/类
/**
 * This is a helper function to compute HDFS block distribution on demand
 * @param conf configuration
 * @param tableDescriptor HTableDescriptor of the table
 * @param regionEncodedName encoded name of the region
 * @return The HDFS blocks distribution for the given region.
 * @throws IOException
 */
static public HDFSBlocksDistribution computeHDFSBlocksDistribution(Configuration conf,
    HTableDescriptor tableDescriptor, String regionEncodedName) throws IOException {
  HDFSBlocksDistribution hdfsBlocksDistribution = new HDFSBlocksDistribution();
  Path tablePath = FSUtils.getTablePath(FSUtils.getRootDir(conf), tableDescriptor.getName());
  FileSystem fs = tablePath.getFileSystem(conf);

  for (HColumnDescriptor family : tableDescriptor.getFamilies()) {
    Path storeHomeDir = Store.getStoreHomedir(tablePath, regionEncodedName, family.getName());
    if (!fs.exists(storeHomeDir)) continue;

    FileStatus[] hfilesStatus = null;
    hfilesStatus = fs.listStatus(storeHomeDir);

    for (FileStatus hfileStatus : hfilesStatus) {
      HDFSBlocksDistribution storeFileBlocksDistribution =
          FSUtils.computeHDFSBlocksDistribution(fs, hfileStatus, 0, hfileStatus.getLen());
      hdfsBlocksDistribution.add(storeFileBlocksDistribution);
    }
  }
  return hdfsBlocksDistribution;
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:30,代码来源:HRegion.java

示例6: computeHDFSBlocksDistribution

import org.apache.hadoop.hbase.HDFSBlocksDistribution; //导入方法依赖的package包/类
/**
 * This is a helper function to compute HDFS block distribution on demand
 *
 * @param conf            configuration
 * @param tableDescriptor HTableDescriptor of the table
 * @param regionInfo      encoded name of the region
 * @param tablePath       the table directory
 * @return The HDFS blocks distribution for the given region.
 * @throws IOException
 */
public static HDFSBlocksDistribution computeHDFSBlocksDistribution(final Configuration conf,
                                                                   final HTableDescriptor tableDescriptor, final HRegionInfo regionInfo, Path tablePath)
        throws IOException {
    HDFSBlocksDistribution hdfsBlocksDistribution = new HDFSBlocksDistribution();
    FileSystem fs = tablePath.getFileSystem(conf);

    HRegionFileSystem regionFs = new HRegionFileSystem(conf, fs, tablePath, regionInfo);
    for (HColumnDescriptor family : tableDescriptor.getFamilies()) {
        Collection<StoreFileInfo> storeFiles = regionFs.getStoreFiles(family.getNameAsString());
        if (storeFiles == null) continue;

        for (StoreFileInfo storeFileInfo : storeFiles) {
            hdfsBlocksDistribution.add(storeFileInfo.computeHDFSBlocksDistribution(fs));
        }
    }
    return hdfsBlocksDistribution;
}
 
开发者ID:grokcoder,项目名称:pbase,代码行数:28,代码来源:HRegion.java

示例7: computeHDFSBlocksDistribution

import org.apache.hadoop.hbase.HDFSBlocksDistribution; //导入方法依赖的package包/类
/**
 * This is a helper function to compute HDFS block distribution on demand
 * @param conf configuration
 * @param tableDescriptor HTableDescriptor of the table
 * @param regionInfo encoded name of the region
 * @param tablePath the table directory
 * @return The HDFS blocks distribution for the given region.
 * @throws IOException
 */
public static HDFSBlocksDistribution computeHDFSBlocksDistribution(final Configuration conf,
    final HTableDescriptor tableDescriptor, final HRegionInfo regionInfo,  Path tablePath)
    throws IOException {
  HDFSBlocksDistribution hdfsBlocksDistribution = new HDFSBlocksDistribution();
  FileSystem fs = tablePath.getFileSystem(conf);

  HRegionFileSystem regionFs = new HRegionFileSystem(conf, fs, tablePath, regionInfo);
  for (HColumnDescriptor family: tableDescriptor.getFamilies()) {
    Collection<StoreFileInfo> storeFiles = regionFs.getStoreFiles(family.getNameAsString());
    if (storeFiles == null) continue;

    for (StoreFileInfo storeFileInfo : storeFiles) {
      hdfsBlocksDistribution.add(storeFileInfo.computeHDFSBlocksDistribution(fs));
    }
  }
  return hdfsBlocksDistribution;
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:27,代码来源:HRegion.java

示例8: computeHDFSBlocksDistribution

import org.apache.hadoop.hbase.HDFSBlocksDistribution; //导入方法依赖的package包/类
/**
 * This is a helper function to compute HDFS block distribution on demand
 * @param conf configuration
 * @param tableDescriptor HTableDescriptor of the table
 * @param regionInfo encoded name of the region
 * @return The HDFS blocks distribution for the given region.
 * @throws IOException
 */
public static HDFSBlocksDistribution computeHDFSBlocksDistribution(final Configuration conf,
    final HTableDescriptor tableDescriptor, final HRegionInfo regionInfo) throws IOException {
  HDFSBlocksDistribution hdfsBlocksDistribution = new HDFSBlocksDistribution();
  Path tablePath = FSUtils.getTableDir(FSUtils.getRootDir(conf), tableDescriptor.getTableName());
  FileSystem fs = tablePath.getFileSystem(conf);

  HRegionFileSystem regionFs = new HRegionFileSystem(conf, fs, tablePath, regionInfo);
  for (HColumnDescriptor family: tableDescriptor.getFamilies()) {
    Collection<StoreFileInfo> storeFiles = regionFs.getStoreFiles(family.getNameAsString());
    if (storeFiles == null) continue;

    for (StoreFileInfo storeFileInfo : storeFiles) {
      hdfsBlocksDistribution.add(storeFileInfo.computeHDFSBlocksDistribution(fs));
    }
  }
  return hdfsBlocksDistribution;
}
 
开发者ID:cloud-software-foundation,项目名称:c5,代码行数:26,代码来源:HRegion.java

示例9: getHDFSBlocksDistribution

import org.apache.hadoop.hbase.HDFSBlocksDistribution; //导入方法依赖的package包/类
@Override public HDFSBlocksDistribution getHDFSBlocksDistribution() {
  HDFSBlocksDistribution hdfsBlocksDistribution = new HDFSBlocksDistribution();
  synchronized (this.stores) {
    for (Store store : this.stores.values()) {
      Collection<StoreFile> storeFiles = store.getStorefiles();
      if (storeFiles == null) continue;
      for (StoreFile sf : storeFiles) {
        HDFSBlocksDistribution storeFileBlocksDistribution = sf.getHDFSBlockDistribution();
        hdfsBlocksDistribution.add(storeFileBlocksDistribution);
      }
    }
  }
  return hdfsBlocksDistribution;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:15,代码来源:HRegion.java

示例10: getHDFSBlocksDistribution

import org.apache.hadoop.hbase.HDFSBlocksDistribution; //导入方法依赖的package包/类
/**
 * This function will return the HDFS blocks distribution based on the data captured when HFile is
 * created
 * @return The HDFS blocks distribution for the region.
 */
public HDFSBlocksDistribution getHDFSBlocksDistribution() {
  HDFSBlocksDistribution hdfsBlocksDistribution = new HDFSBlocksDistribution();
  synchronized (this.stores) {
    for (Store store : this.stores.values()) {
      for (StoreFile sf : store.getStorefiles()) {
        HDFSBlocksDistribution storeFileBlocksDistribution = sf.getHDFSBlockDistribution();
        hdfsBlocksDistribution.add(storeFileBlocksDistribution);
      }
    }
  }
  return hdfsBlocksDistribution;
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:18,代码来源:HRegion.java

示例11: getHDFSBlocksDistribution

import org.apache.hadoop.hbase.HDFSBlocksDistribution; //导入方法依赖的package包/类
/**
 * This function will return the HDFS blocks distribution based on the data
 * captured when HFile is created
 *
 * @return The HDFS blocks distribution for the region.
 */
public HDFSBlocksDistribution getHDFSBlocksDistribution() {
    HDFSBlocksDistribution hdfsBlocksDistribution =
            new HDFSBlocksDistribution();
    synchronized (this.stores) {
        for (Store store : this.stores.values()) {
            for (StoreFile sf : store.getStorefiles()) {
                HDFSBlocksDistribution storeFileBlocksDistribution =
                        sf.getHDFSBlockDistribution();
                hdfsBlocksDistribution.add(storeFileBlocksDistribution);
            }
        }
    }
    return hdfsBlocksDistribution;
}
 
开发者ID:grokcoder,项目名称:pbase,代码行数:21,代码来源:HRegion.java

示例12: getHDFSBlocksDistribution

import org.apache.hadoop.hbase.HDFSBlocksDistribution; //导入方法依赖的package包/类
/**
 * This function will return the HDFS blocks distribution based on the data
 * captured when HFile is created
 * @return The HDFS blocks distribution for the region.
 */
public HDFSBlocksDistribution getHDFSBlocksDistribution() {
  HDFSBlocksDistribution hdfsBlocksDistribution =
    new HDFSBlocksDistribution();
  synchronized (this.stores) {
    for (Store store : this.stores.values()) {
      for (StoreFile sf : store.getStorefiles()) {
        HDFSBlocksDistribution storeFileBlocksDistribution =
          sf.getHDFSBlockDistribution();
        hdfsBlocksDistribution.add(storeFileBlocksDistribution);
      }
    }
  }
  return hdfsBlocksDistribution;
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:20,代码来源:HRegion.java

示例13: computeHDFSBlocksDistribution

import org.apache.hadoop.hbase.HDFSBlocksDistribution; //导入方法依赖的package包/类
/**
  * This is a helper function to compute HDFS block distribution on demand
  * @param conf configuration
  * @param tableDescriptor HTableDescriptor of the table
  * @param regionEncodedName encoded name of the region
  * @return The HDFS blocks distribution for the given region.
* @throws IOException
  */
 static public HDFSBlocksDistribution computeHDFSBlocksDistribution(
   Configuration conf, HTableDescriptor tableDescriptor,
   String regionEncodedName) throws IOException {
   HDFSBlocksDistribution hdfsBlocksDistribution =
     new HDFSBlocksDistribution();
   Path tablePath = FSUtils.getTablePath(FSUtils.getRootDir(conf),
     tableDescriptor.getName());
   FileSystem fs = tablePath.getFileSystem(conf);

   for (HColumnDescriptor family: tableDescriptor.getFamilies()) {
     Path storeHomeDir = Store.getStoreHomedir(tablePath, regionEncodedName,
     family.getName());
     if (!fs.exists(storeHomeDir))continue;

     FileStatus[] hfilesStatus = null;
     hfilesStatus = fs.listStatus(storeHomeDir);

     for (FileStatus hfileStatus : hfilesStatus) {
       HDFSBlocksDistribution storeFileBlocksDistribution =
         FSUtils.computeHDFSBlocksDistribution(fs, hfileStatus, 0,
         hfileStatus.getLen());
       hdfsBlocksDistribution.add(storeFileBlocksDistribution);
     }
   }
   return hdfsBlocksDistribution;
 }
 
开发者ID:wanhao,项目名称:IRIndex,代码行数:35,代码来源:HRegion.java

示例14: computeHDFSBlocksDistribution

import org.apache.hadoop.hbase.HDFSBlocksDistribution; //导入方法依赖的package包/类
/**
 * This is a helper function to compute HDFS block distribution on demand
 * @param conf configuration
 * @param tableDescriptor TableDescriptor of the table
 * @param regionInfo encoded name of the region
 * @param tablePath the table directory
 * @return The HDFS blocks distribution for the given region.
 * @throws IOException
 */
public static HDFSBlocksDistribution computeHDFSBlocksDistribution(Configuration conf,
    TableDescriptor tableDescriptor, RegionInfo regionInfo, Path tablePath) throws IOException {
  HDFSBlocksDistribution hdfsBlocksDistribution = new HDFSBlocksDistribution();
  FileSystem fs = tablePath.getFileSystem(conf);

  HRegionFileSystem regionFs = new HRegionFileSystem(conf, fs, tablePath, regionInfo);
  for (ColumnFamilyDescriptor family : tableDescriptor.getColumnFamilies()) {
    List<LocatedFileStatus> locatedFileStatusList = HRegionFileSystem
        .getStoreFilesLocatedStatus(regionFs, family.getNameAsString(), true);
    if (locatedFileStatusList == null) {
      continue;
    }

    for (LocatedFileStatus status : locatedFileStatusList) {
      Path p = status.getPath();
      if (StoreFileInfo.isReference(p) || HFileLink.isHFileLink(p)) {
        // Only construct StoreFileInfo object if its not a hfile, save obj
        // creation
        StoreFileInfo storeFileInfo = new StoreFileInfo(conf, fs, status);
        hdfsBlocksDistribution.add(storeFileInfo
            .computeHDFSBlocksDistribution(fs));
      } else if (StoreFileInfo.isHFile(p)) {
        // If its a HFile, then lets just add to the block distribution
        // lets not create more objects here, not even another HDFSBlocksDistribution
        FSUtils.addToHDFSBlocksDistribution(hdfsBlocksDistribution,
            status.getBlockLocations());
      } else {
        throw new IOException("path=" + p
            + " doesn't look like a valid StoreFile");
      }
    }
  }
  return hdfsBlocksDistribution;
}
 
开发者ID:apache,项目名称:hbase,代码行数:44,代码来源:HRegion.java

示例15: computeHDFSBlocksDistribution

import org.apache.hadoop.hbase.HDFSBlocksDistribution; //导入方法依赖的package包/类
/**
  * This is a helper function to compute HDFS block distribution on demand
  * @param conf configuration
  * @param tableDescriptor HTableDescriptor of the table
  * @param regionEncodedName encoded name of the region
  * @return The HDFS blocks distribution for the given region.
* @throws IOException
  */
 static public HDFSBlocksDistribution computeHDFSBlocksDistribution(
   Configuration conf, HTableDescriptor tableDescriptor,
   String regionEncodedName) throws IOException {
   HDFSBlocksDistribution hdfsBlocksDistribution =
     new HDFSBlocksDistribution();
   Path tablePath = FSUtils.getTablePath(FSUtils.getRootDir(conf),
     tableDescriptor.getName());
   FileSystem fs = tablePath.getFileSystem(conf);

   for (HColumnDescriptor family: tableDescriptor.getFamilies()) {
     Path storeHomeDir = HStore.getStoreHomedir(tablePath, regionEncodedName,
     family.getName());
     if (!fs.exists(storeHomeDir))continue;

     FileStatus[] hfilesStatus = null;
     hfilesStatus = fs.listStatus(storeHomeDir);

     for (FileStatus hfileStatus : hfilesStatus) {
       HDFSBlocksDistribution storeFileBlocksDistribution =
         FSUtils.computeHDFSBlocksDistribution(fs, hfileStatus, 0,
         hfileStatus.getLen());
       hdfsBlocksDistribution.add(storeFileBlocksDistribution);
     }
   }
   return hdfsBlocksDistribution;
 }
 
开发者ID:daidong,项目名称:DominoHBase,代码行数:35,代码来源:HRegion.java


注:本文中的org.apache.hadoop.hbase.HDFSBlocksDistribution.add方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。