当前位置: 首页>>代码示例>>Java>>正文


Java HDFSBlocksDistribution.getTopHosts方法代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.HDFSBlocksDistribution.getTopHosts方法的典型用法代码示例。如果您正苦于以下问题:Java HDFSBlocksDistribution.getTopHosts方法的具体用法?Java HDFSBlocksDistribution.getTopHosts怎么用?Java HDFSBlocksDistribution.getTopHosts使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hbase.HDFSBlocksDistribution的用法示例。


在下文中一共展示了HDFSBlocksDistribution.getTopHosts方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: getStoreDirHosts

import org.apache.hadoop.hbase.HDFSBlocksDistribution; //导入方法依赖的package包/类
/**
 * return the top hosts of the store files, used by the Split
 */
private static String[] getStoreDirHosts(final FileSystem fs, final Path path)
    throws IOException {
  FileStatus[] files = FSUtils.listStatus(fs, path);
  if (files == null) {
    return new String[] {};
  }

  HDFSBlocksDistribution hdfsBlocksDistribution = new HDFSBlocksDistribution();
  for (FileStatus hfileStatus: files) {
    HDFSBlocksDistribution storeFileBlocksDistribution =
      FSUtils.computeHDFSBlocksDistribution(fs, hfileStatus, 0, hfileStatus.getLen());
    hdfsBlocksDistribution.add(storeFileBlocksDistribution);
  }

  List<String> hosts = hdfsBlocksDistribution.getTopHosts();
  return hosts.toArray(new String[hosts.size()]);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:21,代码来源:CompactionTool.java

示例2: getTopBlockLocations

import org.apache.hadoop.hbase.HDFSBlocksDistribution; //导入方法依赖的package包/类
/**
 * Returns an ordered list of hosts that are hosting the blocks for this
 * region.  The weight of each host is the sum of the block lengths of all
 * files on that host, so the first host in the list is the server which
 * holds the most bytes of the given region's HFiles.
 *
 * @param fs the filesystem
 * @param region region
 * @return ordered list of hosts holding blocks of the specified region
 */
@SuppressWarnings("unused")
private List<ServerName> getTopBlockLocations(FileSystem fs,
  HRegionInfo region) {
  List<ServerName> topServerNames = null;
  try {
    HTableDescriptor tableDescriptor = getTableDescriptor(
      region.getTableName());
    if (tableDescriptor != null) {
      HDFSBlocksDistribution blocksDistribution =
        HRegion.computeHDFSBlocksDistribution(config, tableDescriptor,
        region.getEncodedName());
      List<String> topHosts = blocksDistribution.getTopHosts();
      topServerNames = mapHostNameToServerName(topHosts);
    }
  } catch (IOException ioe) {
    LOG.debug("IOException during HDFSBlocksDistribution computation. for " +
      "region = " + region.getEncodedName() , ioe);
  }
  
  return topServerNames;
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:32,代码来源:DefaultLoadBalancer.java

示例3: getStoreDirHosts

import org.apache.hadoop.hbase.HDFSBlocksDistribution; //导入方法依赖的package包/类
/**
 * return the top hosts of the store files, used by the Split
 */
private static String[] getStoreDirHosts(final FileSystem fs, final Path path)
    throws IOException {
  FileStatus[] files = FSUtils.listStatus(fs, path, null);
  if (files == null) {
    return new String[] {};
  }

  HDFSBlocksDistribution hdfsBlocksDistribution = new HDFSBlocksDistribution();
  for (FileStatus hfileStatus: files) {
    HDFSBlocksDistribution storeFileBlocksDistribution =
      FSUtils.computeHDFSBlocksDistribution(fs, hfileStatus, 0, hfileStatus.getLen());
    hdfsBlocksDistribution.add(storeFileBlocksDistribution);
  }

  List<String> hosts = hdfsBlocksDistribution.getTopHosts();
  return hosts.toArray(new String[hosts.size()]);
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:21,代码来源:CompactionTool.java

示例4: internalGetTopBlockLocation

import org.apache.hadoop.hbase.HDFSBlocksDistribution; //导入方法依赖的package包/类
/**
 * Returns an ordered list of hosts that are hosting the blocks for this
 * region. The weight of each host is the sum of the block lengths of all
 * files on that host, so the first host in the list is the server which holds
 * the most bytes of the given region's HFiles.
 *
 * @param region region
 * @return ordered list of hosts holding blocks of the specified region
 */
protected List<ServerName> internalGetTopBlockLocation(HRegionInfo region) {
  List<ServerName> topServerNames = null;
  try {
    HTableDescriptor tableDescriptor = getTableDescriptor(region.getTable());
    if (tableDescriptor != null) {
      HDFSBlocksDistribution blocksDistribution =
          HRegion.computeHDFSBlocksDistribution(getConf(), tableDescriptor, region);
      List<String> topHosts = blocksDistribution.getTopHosts();
      topServerNames = mapHostNameToServerName(topHosts);
    }
  } catch (IOException ioe) {
    LOG.debug("IOException during HDFSBlocksDistribution computation. for " + "region = "
        + region.getEncodedName(), ioe);
  }

  return topServerNames;
}
 
开发者ID:grokcoder,项目名称:pbase,代码行数:27,代码来源:RegionLocationFinder.java

示例5: internalGetTopBlockLocation

import org.apache.hadoop.hbase.HDFSBlocksDistribution; //导入方法依赖的package包/类
/**
 * Returns an ordered list of hosts that are hosting the blocks for this
 * region. The weight of each host is the sum of the block lengths of all
 * files on that host, so the first host in the list is the server which holds
 * the most bytes of the given region's HFiles.
 *
 * @param region region
 * @return ordered list of hosts holding blocks of the specified region
 */
protected List<ServerName> internalGetTopBlockLocation(HRegionInfo region) {
  List<ServerName> topServerNames = null;
  try {
    HTableDescriptor tableDescriptor = getTableDescriptor(region.getTableName());
    if (tableDescriptor != null) {
      HDFSBlocksDistribution blocksDistribution =
          HRegion.computeHDFSBlocksDistribution(getConf(), tableDescriptor,
            region.getEncodedName());
      List<String> topHosts = blocksDistribution.getTopHosts();
      topServerNames = mapHostNameToServerName(topHosts);
    }
  } catch (IOException ioe) {
    LOG.debug("IOException during HDFSBlocksDistribution computation. for " + "region = "
        + region.getEncodedName(), ioe);
  }

  return topServerNames;
}
 
开发者ID:daidong,项目名称:DominoHBase,代码行数:28,代码来源:RegionLocationFinder.java

示例6: getTopBlockLocations

import org.apache.hadoop.hbase.HDFSBlocksDistribution; //导入方法依赖的package包/类
/**
 * Returns an ordered list of hosts which have better locality for this region
 * than the current host.
 */
protected List<ServerName> getTopBlockLocations(RegionInfo region, String currentHost) {
  HDFSBlocksDistribution blocksDistribution = getBlockDistribution(region);
  List<String> topHosts = new ArrayList<>();
  for (String host : blocksDistribution.getTopHosts()) {
    if (host.equals(currentHost)) {
      break;
    }
    topHosts.add(host);
  }
  return mapHostNameToServerName(topHosts);
}
 
开发者ID:apache,项目名称:hbase,代码行数:16,代码来源:RegionLocationFinder.java

示例7: getTopBlockLocations

import org.apache.hadoop.hbase.HDFSBlocksDistribution; //导入方法依赖的package包/类
protected List<ServerName> getTopBlockLocations(HRegionInfo region) {
  HDFSBlocksDistribution blocksDistribution = getBlockDistribution(region);
  List<String> topHosts = blocksDistribution.getTopHosts();
  return mapHostNameToServerName(topHosts);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:6,代码来源:RegionLocationFinder.java

示例8: testgetHDFSBlocksDistribution

import org.apache.hadoop.hbase.HDFSBlocksDistribution; //导入方法依赖的package包/类
@Test
public void testgetHDFSBlocksDistribution() throws Exception {
  HBaseTestingUtility htu = new HBaseTestingUtility();
  // Why do we set the block size in this test?  If we set it smaller than the kvs, then we'll
  // break up the file in to more pieces that can be distributed across the three nodes and we
  // won't be able to have the condition this test asserts; that at least one node has
  // a copy of all replicas -- if small block size, then blocks are spread evenly across the
  // the three nodes.  hfilev3 with tags seems to put us over the block size.  St.Ack.
  // final int DEFAULT_BLOCK_SIZE = 1024;
  // htu.getConfiguration().setLong("dfs.blocksize", DEFAULT_BLOCK_SIZE);
  htu.getConfiguration().setInt("dfs.replication", 2);

  // set up a cluster with 3 nodes
  MiniHBaseCluster cluster = null;
  String dataNodeHosts[] = new String[] { "host1", "host2", "host3" };
  int regionServersCount = 3;

  try {
    cluster = htu.startMiniCluster(1, regionServersCount, dataNodeHosts);
    byte[][] families = { fam1, fam2 };
    Table ht = htu.createTable(Bytes.toBytes(this.getName()), families);

    // Setting up region
    byte row[] = Bytes.toBytes("row1");
    byte col[] = Bytes.toBytes("col1");

    Put put = new Put(row);
    put.add(fam1, col, 1, Bytes.toBytes("test1"));
    put.add(fam2, col, 1, Bytes.toBytes("test2"));
    ht.put(put);

    HRegion firstRegion = htu.getHBaseCluster().getRegions(TableName.valueOf(this.getName()))
        .get(0);
    firstRegion.flush(true);
    HDFSBlocksDistribution blocksDistribution1 = firstRegion.getHDFSBlocksDistribution();

    // Given the default replication factor is 2 and we have 2 HFiles,
    // we will have total of 4 replica of blocks on 3 datanodes; thus there
    // must be at least one host that have replica for 2 HFiles. That host's
    // weight will be equal to the unique block weight.
    long uniqueBlocksWeight1 = blocksDistribution1.getUniqueBlocksTotalWeight();
    StringBuilder sb = new StringBuilder();
    for (String host: blocksDistribution1.getTopHosts()) {
      if (sb.length() > 0) sb.append(", ");
      sb.append(host);
      sb.append("=");
      sb.append(blocksDistribution1.getWeight(host));
    }

    String topHost = blocksDistribution1.getTopHosts().get(0);
    long topHostWeight = blocksDistribution1.getWeight(topHost);
    String msg = "uniqueBlocksWeight=" + uniqueBlocksWeight1 + ", topHostWeight=" +
      topHostWeight + ", topHost=" + topHost + "; " + sb.toString();
    LOG.info(msg);
    assertTrue(msg, uniqueBlocksWeight1 == topHostWeight);

    // use the static method to compute the value, it should be the same.
    // static method is used by load balancer or other components
    HDFSBlocksDistribution blocksDistribution2 = HRegion.computeHDFSBlocksDistribution(
        htu.getConfiguration(), firstRegion.getTableDesc(), firstRegion.getRegionInfo());
    long uniqueBlocksWeight2 = blocksDistribution2.getUniqueBlocksTotalWeight();

    assertTrue(uniqueBlocksWeight1 == uniqueBlocksWeight2);

    ht.close();
  } finally {
    if (cluster != null) {
      htu.shutdownMiniCluster();
    }
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:72,代码来源:TestHRegion.java

示例9: testgetHDFSBlocksDistribution

import org.apache.hadoop.hbase.HDFSBlocksDistribution; //导入方法依赖的package包/类
@Test
public void testgetHDFSBlocksDistribution() throws Exception {
  HBaseTestingUtility htu = new HBaseTestingUtility();
  // Why do we set the block size in this test?  If we set it smaller than the kvs, then we'll
  // break up the file in to more pieces that can be distributed across the three nodes and we
  // won't be able to have the condition this test asserts; that at least one node has
  // a copy of all replicas -- if small block size, then blocks are spread evenly across the
  // the three nodes.  hfilev3 with tags seems to put us over the block size.  St.Ack.
  // final int DEFAULT_BLOCK_SIZE = 1024;
  // htu.getConfiguration().setLong("dfs.blocksize", DEFAULT_BLOCK_SIZE);
  htu.getConfiguration().setInt("dfs.replication", 2);

  // set up a cluster with 3 nodes
  MiniHBaseCluster cluster = null;
  String dataNodeHosts[] = new String[] { "host1", "host2", "host3" };
  int regionServersCount = 3;

  try {
    cluster = htu.startMiniCluster(1, regionServersCount, dataNodeHosts);
    byte[][] families = { fam1, fam2 };
    Table ht = htu.createTable(Bytes.toBytes(this.getName()), families);

    // Setting up region
    byte row[] = Bytes.toBytes("row1");
    byte col[] = Bytes.toBytes("col1");

    Put put = new Put(row);
    put.add(fam1, col, 1, Bytes.toBytes("test1"));
    put.add(fam2, col, 1, Bytes.toBytes("test2"));
    ht.put(put);

    HRegion firstRegion = htu.getHBaseCluster().getRegions(TableName.valueOf(this.getName()))
        .get(0);
    firstRegion.flushcache();
    HDFSBlocksDistribution blocksDistribution1 = firstRegion.getHDFSBlocksDistribution();

    // Given the default replication factor is 2 and we have 2 HFiles,
    // we will have total of 4 replica of blocks on 3 datanodes; thus there
    // must be at least one host that have replica for 2 HFiles. That host's
    // weight will be equal to the unique block weight.
    long uniqueBlocksWeight1 = blocksDistribution1.getUniqueBlocksTotalWeight();
    StringBuilder sb = new StringBuilder();
    for (String host: blocksDistribution1.getTopHosts()) {
      if (sb.length() > 0) sb.append(", ");
      sb.append(host);
      sb.append("=");
      sb.append(blocksDistribution1.getWeight(host));
    }

    String topHost = blocksDistribution1.getTopHosts().get(0);
    long topHostWeight = blocksDistribution1.getWeight(topHost);
    String msg = "uniqueBlocksWeight=" + uniqueBlocksWeight1 + ", topHostWeight=" +
      topHostWeight + ", topHost=" + topHost + "; " + sb.toString();
    LOG.info(msg);
    assertTrue(msg, uniqueBlocksWeight1 == topHostWeight);

    // use the static method to compute the value, it should be the same.
    // static method is used by load balancer or other components
    HDFSBlocksDistribution blocksDistribution2 = HRegion.computeHDFSBlocksDistribution(
        htu.getConfiguration(), firstRegion.getTableDesc(), firstRegion.getRegionInfo());
    long uniqueBlocksWeight2 = blocksDistribution2.getUniqueBlocksTotalWeight();

    assertTrue(uniqueBlocksWeight1 == uniqueBlocksWeight2);

    ht.close();
  } finally {
    if (cluster != null) {
      htu.shutdownMiniCluster();
    }
  }
}
 
开发者ID:grokcoder,项目名称:pbase,代码行数:72,代码来源:TestHRegion.java

示例10: testgetHDFSBlocksDistribution

import org.apache.hadoop.hbase.HDFSBlocksDistribution; //导入方法依赖的package包/类
@Test
public void testgetHDFSBlocksDistribution() throws Exception {
  HBaseTestingUtility htu = new HBaseTestingUtility();
  // Why do we set the block size in this test?  If we set it smaller than the kvs, then we'll
  // break up the file in to more pieces that can be distributed across the three nodes and we
  // won't be able to have the condition this test asserts; that at least one node has
  // a copy of all replicas -- if small block size, then blocks are spread evenly across the
  // the three nodes.  hfilev3 with tags seems to put us over the block size.  St.Ack.
  // final int DEFAULT_BLOCK_SIZE = 1024;
  // htu.getConfiguration().setLong("dfs.blocksize", DEFAULT_BLOCK_SIZE);
  htu.getConfiguration().setInt("dfs.replication", 2);

  // set up a cluster with 3 nodes
  MiniHBaseCluster cluster = null;
  String dataNodeHosts[] = new String[] { "host1", "host2", "host3" };
  int regionServersCount = 3;

  try {
    cluster = htu.startMiniCluster(1, regionServersCount, dataNodeHosts);
    byte[][] families = { fam1, fam2 };
    Table ht = htu.createTable(tableName, families);

    // Setting up region
    byte row[] = Bytes.toBytes("row1");
    byte col[] = Bytes.toBytes("col1");

    Put put = new Put(row);
    put.addColumn(fam1, col, 1, Bytes.toBytes("test1"));
    put.addColumn(fam2, col, 1, Bytes.toBytes("test2"));
    ht.put(put);

    HRegion firstRegion = htu.getHBaseCluster().getRegions(tableName).get(0);
    firstRegion.flush(true);
    HDFSBlocksDistribution blocksDistribution1 = firstRegion.getHDFSBlocksDistribution();

    // Given the default replication factor is 2 and we have 2 HFiles,
    // we will have total of 4 replica of blocks on 3 datanodes; thus there
    // must be at least one host that have replica for 2 HFiles. That host's
    // weight will be equal to the unique block weight.
    long uniqueBlocksWeight1 = blocksDistribution1.getUniqueBlocksTotalWeight();
    StringBuilder sb = new StringBuilder();
    for (String host: blocksDistribution1.getTopHosts()) {
      if (sb.length() > 0) sb.append(", ");
      sb.append(host);
      sb.append("=");
      sb.append(blocksDistribution1.getWeight(host));
    }

    String topHost = blocksDistribution1.getTopHosts().get(0);
    long topHostWeight = blocksDistribution1.getWeight(topHost);
    String msg = "uniqueBlocksWeight=" + uniqueBlocksWeight1 + ", topHostWeight=" +
      topHostWeight + ", topHost=" + topHost + "; " + sb.toString();
    LOG.info(msg);
    assertTrue(msg, uniqueBlocksWeight1 == topHostWeight);

    // use the static method to compute the value, it should be the same.
    // static method is used by load balancer or other components
    HDFSBlocksDistribution blocksDistribution2 = HRegion.computeHDFSBlocksDistribution(
        htu.getConfiguration(), firstRegion.getTableDescriptor(), firstRegion.getRegionInfo());
    long uniqueBlocksWeight2 = blocksDistribution2.getUniqueBlocksTotalWeight();

    assertTrue(uniqueBlocksWeight1 == uniqueBlocksWeight2);

    ht.close();
  } finally {
    if (cluster != null) {
      htu.shutdownMiniCluster();
    }
  }
}
 
开发者ID:apache,项目名称:hbase,代码行数:71,代码来源:TestHRegion.java

示例11: testgetHDFSBlocksDistribution

import org.apache.hadoop.hbase.HDFSBlocksDistribution; //导入方法依赖的package包/类
@Test
public void testgetHDFSBlocksDistribution() throws Exception {
  HBaseTestingUtility htu = new HBaseTestingUtility();
  // Why do we set the block size in this test?  If we set it smaller than the kvs, then we'll
  // break up the file in to more pieces that can be distributed across the three nodes and we
  // won't be able to have the condition this test asserts; that at least one node has
  // a copy of all replicas -- if small block size, then blocks are spread evenly across the
  // the three nodes.  hfilev3 with tags seems to put us over the block size.  St.Ack.
  // final int DEFAULT_BLOCK_SIZE = 1024;
  // htu.getConfiguration().setLong("dfs.blocksize", DEFAULT_BLOCK_SIZE);
  htu.getConfiguration().setInt("dfs.replication", 2);

  // set up a cluster with 3 nodes
  MiniHBaseCluster cluster = null;
  String dataNodeHosts[] = new String[] { "host1", "host2", "host3" };
  int regionServersCount = 3;

  try {
    cluster = htu.startMiniCluster(1, regionServersCount, dataNodeHosts);
    byte[][] families = { fam1, fam2 };
    HTable ht = htu.createTable(Bytes.toBytes(this.getName()), families);

    // Setting up region
    byte row[] = Bytes.toBytes("row1");
    byte col[] = Bytes.toBytes("col1");

    Put put = new Put(row);
    put.add(fam1, col, 1, Bytes.toBytes("test1"));
    put.add(fam2, col, 1, Bytes.toBytes("test2"));
    ht.put(put);

    HRegion firstRegion = htu.getHBaseCluster().getRegions(TableName.valueOf(this.getName()))
        .get(0);
    firstRegion.flushcache();
    HDFSBlocksDistribution blocksDistribution1 = firstRegion.getHDFSBlocksDistribution();

    // Given the default replication factor is 2 and we have 2 HFiles,
    // we will have total of 4 replica of blocks on 3 datanodes; thus there
    // must be at least one host that have replica for 2 HFiles. That host's
    // weight will be equal to the unique block weight.
    long uniqueBlocksWeight1 = blocksDistribution1.getUniqueBlocksTotalWeight();
    StringBuilder sb = new StringBuilder();
    for (String host: blocksDistribution1.getTopHosts()) {
      if (sb.length() > 0) sb.append(", ");
      sb.append(host);
      sb.append("=");
      sb.append(blocksDistribution1.getWeight(host));
    }

    String topHost = blocksDistribution1.getTopHosts().get(0);
    long topHostWeight = blocksDistribution1.getWeight(topHost);
    String msg = "uniqueBlocksWeight=" + uniqueBlocksWeight1 + ", topHostWeight=" +
      topHostWeight + ", topHost=" + topHost + "; " + sb.toString();
    LOG.info(msg);
    assertTrue(msg, uniqueBlocksWeight1 == topHostWeight);

    // use the static method to compute the value, it should be the same.
    // static method is used by load balancer or other components
    HDFSBlocksDistribution blocksDistribution2 = HRegion.computeHDFSBlocksDistribution(
        htu.getConfiguration(), firstRegion.getTableDesc(), firstRegion.getRegionInfo());
    long uniqueBlocksWeight2 = blocksDistribution2.getUniqueBlocksTotalWeight();

    assertTrue(uniqueBlocksWeight1 == uniqueBlocksWeight2);

    ht.close();
  } finally {
    if (cluster != null) {
      htu.shutdownMiniCluster();
    }
  }
}
 
开发者ID:shenli-uiuc,项目名称:PyroDB,代码行数:72,代码来源:TestHRegion.java


注:本文中的org.apache.hadoop.hbase.HDFSBlocksDistribution.getTopHosts方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。