当前位置: 首页>>代码示例>>Java>>正文


Java HDFSBlocksDistribution.getUniqueBlocksTotalWeight方法代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.HDFSBlocksDistribution.getUniqueBlocksTotalWeight方法的典型用法代码示例。如果您正苦于以下问题:Java HDFSBlocksDistribution.getUniqueBlocksTotalWeight方法的具体用法?Java HDFSBlocksDistribution.getUniqueBlocksTotalWeight怎么用?Java HDFSBlocksDistribution.getUniqueBlocksTotalWeight使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hbase.HDFSBlocksDistribution的用法示例。


在下文中一共展示了HDFSBlocksDistribution.getUniqueBlocksTotalWeight方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testInternalGetTopBlockLocation

import org.apache.hadoop.hbase.HDFSBlocksDistribution; //导入方法依赖的package包/类
@Test
public void testInternalGetTopBlockLocation() throws Exception {
  for (int i = 0; i < ServerNum; i++) {
    HRegionServer server = cluster.getRegionServer(i);
    for (Region region : server.getOnlineRegions(tableName)) {
      // get region's hdfs block distribution by region and RegionLocationFinder, 
      // they should have same result
      HDFSBlocksDistribution blocksDistribution1 = region.getHDFSBlocksDistribution();
      HDFSBlocksDistribution blocksDistribution2 = finder.getBlockDistribution(region
          .getRegionInfo());
      assertEquals(blocksDistribution1.getUniqueBlocksTotalWeight(),
        blocksDistribution2.getUniqueBlocksTotalWeight());
      if (blocksDistribution1.getUniqueBlocksTotalWeight() != 0) {
        assertEquals(blocksDistribution1.getTopHosts().get(0), blocksDistribution2.getTopHosts()
            .get(0));
      }
    }
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:20,代码来源:TestRegionLocationFinder.java

示例2: testInternalGetTopBlockLocation

import org.apache.hadoop.hbase.HDFSBlocksDistribution; //导入方法依赖的package包/类
@Test
public void testInternalGetTopBlockLocation() throws Exception {
  for (int i = 0; i < ServerNum; i++) {
    HRegionServer server = cluster.getRegionServer(i);
    for (HRegion region : server.getRegions(tableName)) {
      // get region's hdfs block distribution by region and RegionLocationFinder,
      // they should have same result
      HDFSBlocksDistribution blocksDistribution1 = region.getHDFSBlocksDistribution();
      HDFSBlocksDistribution blocksDistribution2 = finder.getBlockDistribution(region
          .getRegionInfo());
      assertEquals(blocksDistribution1.getUniqueBlocksTotalWeight(),
        blocksDistribution2.getUniqueBlocksTotalWeight());
      if (blocksDistribution1.getUniqueBlocksTotalWeight() != 0) {
        assertEquals(blocksDistribution1.getTopHosts().get(0), blocksDistribution2.getTopHosts()
            .get(0));
      }
    }
  }
}
 
开发者ID:apache,项目名称:hbase,代码行数:20,代码来源:TestRegionLocationFinder.java

示例3: getLowestLocalityRegionOnServer

import org.apache.hadoop.hbase.HDFSBlocksDistribution; //导入方法依赖的package包/类
int getLowestLocalityRegionOnServer(int serverIndex) {
  if (regionFinder != null) {
    float lowestLocality = 1.0f;
    int lowestLocalityRegionIndex = -1;
    if (regionsPerServer[serverIndex].length == 0) {
      // No regions on that region server
      return -1;
    }
    for (int j = 0; j < regionsPerServer[serverIndex].length; j++) {
      int regionIndex = regionsPerServer[serverIndex][j];
      HDFSBlocksDistribution distribution = regionFinder
          .getBlockDistribution(regions[regionIndex]);
      float locality = distribution.getBlockLocalityIndex(servers[serverIndex].getHostname());
      // skip empty region
      if (distribution.getUniqueBlocksTotalWeight() == 0) {
        continue;
      }
      if (locality < lowestLocality) {
        lowestLocality = locality;
        lowestLocalityRegionIndex = j;
      }
    }
    if (lowestLocalityRegionIndex == -1) {
      return -1;
    }
    if (LOG.isTraceEnabled()) {
      LOG.trace("Lowest locality region is "
          + regions[regionsPerServer[serverIndex][lowestLocalityRegionIndex]]
              .getRegionNameAsString() + " with locality " + lowestLocality
          + " and its region server contains " + regionsPerServer[serverIndex].length
          + " regions");
    }
    return regionsPerServer[serverIndex][lowestLocalityRegionIndex];
  } else {
    return -1;
  }
}
 
开发者ID:apache,项目名称:hbase,代码行数:38,代码来源:BaseLoadBalancer.java

示例4: testgetHDFSBlocksDistribution

import org.apache.hadoop.hbase.HDFSBlocksDistribution; //导入方法依赖的package包/类
@Test
public void testgetHDFSBlocksDistribution() throws Exception {
  HBaseTestingUtility htu = new HBaseTestingUtility();
  // Why do we set the block size in this test?  If we set it smaller than the kvs, then we'll
  // break up the file in to more pieces that can be distributed across the three nodes and we
  // won't be able to have the condition this test asserts; that at least one node has
  // a copy of all replicas -- if small block size, then blocks are spread evenly across the
  // the three nodes.  hfilev3 with tags seems to put us over the block size.  St.Ack.
  // final int DEFAULT_BLOCK_SIZE = 1024;
  // htu.getConfiguration().setLong("dfs.blocksize", DEFAULT_BLOCK_SIZE);
  htu.getConfiguration().setInt("dfs.replication", 2);

  // set up a cluster with 3 nodes
  MiniHBaseCluster cluster = null;
  String dataNodeHosts[] = new String[] { "host1", "host2", "host3" };
  int regionServersCount = 3;

  try {
    cluster = htu.startMiniCluster(1, regionServersCount, dataNodeHosts);
    byte[][] families = { fam1, fam2 };
    Table ht = htu.createTable(Bytes.toBytes(this.getName()), families);

    // Setting up region
    byte row[] = Bytes.toBytes("row1");
    byte col[] = Bytes.toBytes("col1");

    Put put = new Put(row);
    put.add(fam1, col, 1, Bytes.toBytes("test1"));
    put.add(fam2, col, 1, Bytes.toBytes("test2"));
    ht.put(put);

    HRegion firstRegion = htu.getHBaseCluster().getRegions(TableName.valueOf(this.getName()))
        .get(0);
    firstRegion.flush(true);
    HDFSBlocksDistribution blocksDistribution1 = firstRegion.getHDFSBlocksDistribution();

    // Given the default replication factor is 2 and we have 2 HFiles,
    // we will have total of 4 replica of blocks on 3 datanodes; thus there
    // must be at least one host that have replica for 2 HFiles. That host's
    // weight will be equal to the unique block weight.
    long uniqueBlocksWeight1 = blocksDistribution1.getUniqueBlocksTotalWeight();
    StringBuilder sb = new StringBuilder();
    for (String host: blocksDistribution1.getTopHosts()) {
      if (sb.length() > 0) sb.append(", ");
      sb.append(host);
      sb.append("=");
      sb.append(blocksDistribution1.getWeight(host));
    }

    String topHost = blocksDistribution1.getTopHosts().get(0);
    long topHostWeight = blocksDistribution1.getWeight(topHost);
    String msg = "uniqueBlocksWeight=" + uniqueBlocksWeight1 + ", topHostWeight=" +
      topHostWeight + ", topHost=" + topHost + "; " + sb.toString();
    LOG.info(msg);
    assertTrue(msg, uniqueBlocksWeight1 == topHostWeight);

    // use the static method to compute the value, it should be the same.
    // static method is used by load balancer or other components
    HDFSBlocksDistribution blocksDistribution2 = HRegion.computeHDFSBlocksDistribution(
        htu.getConfiguration(), firstRegion.getTableDesc(), firstRegion.getRegionInfo());
    long uniqueBlocksWeight2 = blocksDistribution2.getUniqueBlocksTotalWeight();

    assertTrue(uniqueBlocksWeight1 == uniqueBlocksWeight2);

    ht.close();
  } finally {
    if (cluster != null) {
      htu.shutdownMiniCluster();
    }
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:72,代码来源:TestHRegion.java

示例5: testgetHDFSBlocksDistribution

import org.apache.hadoop.hbase.HDFSBlocksDistribution; //导入方法依赖的package包/类
@Test public void testgetHDFSBlocksDistribution() throws Exception {
  HBaseTestingUtility htu = new HBaseTestingUtility();
  final int DEFAULT_BLOCK_SIZE = 1024;
  htu.getConfiguration().setLong("dfs.block.size", DEFAULT_BLOCK_SIZE);
  htu.getConfiguration().setInt("dfs.replication", 2);


  // set up a cluster with 3 nodes
  MiniHBaseCluster cluster = null;
  String dataNodeHosts[] = new String[] { "host1", "host2", "host3" };
  int regionServersCount = 3;

  try {
    cluster = htu.startMiniCluster(1, regionServersCount, dataNodeHosts);
    byte [][] families = {fam1, fam2};
    HTable ht = htu.createTable(Bytes.toBytes(this.getName()), families);

    //Setting up region
    byte row[] = Bytes.toBytes("row1");
    byte col[] = Bytes.toBytes("col1");

    Put put = new Put(row);
    put.add(fam1, col, 1, Bytes.toBytes("test1"));
    put.add(fam2, col, 1, Bytes.toBytes("test2"));
    ht.put(put);

    HRegion firstRegion = htu.getHBaseCluster().
        getRegions(Bytes.toBytes(this.getName())).get(0);
    firstRegion.flushcache();
    HDFSBlocksDistribution blocksDistribution1 =
        firstRegion.getHDFSBlocksDistribution();

    // given the default replication factor is 2 and we have 2 HFiles,
    // we will have total of 4 replica of blocks on 3 datanodes; thus there
    // must be at least one host that have replica for 2 HFiles. That host's
    // weight will be equal to the unique block weight.
    long uniqueBlocksWeight1 =
        blocksDistribution1.getUniqueBlocksTotalWeight();

    String topHost = blocksDistribution1.getTopHosts().get(0);
    long topHostWeight = blocksDistribution1.getWeight(topHost);
    assertTrue(uniqueBlocksWeight1 == topHostWeight);

    // use the static method to compute the value, it should be the same.
    // static method is used by load balancer or other components
    HDFSBlocksDistribution blocksDistribution2 =
      HRegion.computeHDFSBlocksDistribution(htu.getConfiguration(),
      firstRegion.getTableDesc(),
      firstRegion.getRegionInfo().getEncodedName());
    long uniqueBlocksWeight2 =
      blocksDistribution2.getUniqueBlocksTotalWeight();

    assertTrue(uniqueBlocksWeight1 == uniqueBlocksWeight2);

    ht.close();
    } finally {
      if (cluster != null) {
        htu.shutdownMiniCluster();
      }
    }
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:62,代码来源:TestHRegion.java

示例6: testgetHDFSBlocksDistribution

import org.apache.hadoop.hbase.HDFSBlocksDistribution; //导入方法依赖的package包/类
@Test
public void testgetHDFSBlocksDistribution() throws Exception {
  HBaseTestingUtility htu = new HBaseTestingUtility();
  // Why do we set the block size in this test?  If we set it smaller than the kvs, then we'll
  // break up the file in to more pieces that can be distributed across the three nodes and we
  // won't be able to have the condition this test asserts; that at least one node has
  // a copy of all replicas -- if small block size, then blocks are spread evenly across the
  // the three nodes.  hfilev3 with tags seems to put us over the block size.  St.Ack.
  // final int DEFAULT_BLOCK_SIZE = 1024;
  // htu.getConfiguration().setLong("dfs.blocksize", DEFAULT_BLOCK_SIZE);
  htu.getConfiguration().setInt("dfs.replication", 2);

  // set up a cluster with 3 nodes
  MiniHBaseCluster cluster = null;
  String dataNodeHosts[] = new String[] { "host1", "host2", "host3" };
  int regionServersCount = 3;

  try {
    cluster = htu.startMiniCluster(1, regionServersCount, dataNodeHosts);
    byte[][] families = { fam1, fam2 };
    Table ht = htu.createTable(Bytes.toBytes(this.getName()), families);

    // Setting up region
    byte row[] = Bytes.toBytes("row1");
    byte col[] = Bytes.toBytes("col1");

    Put put = new Put(row);
    put.add(fam1, col, 1, Bytes.toBytes("test1"));
    put.add(fam2, col, 1, Bytes.toBytes("test2"));
    ht.put(put);

    HRegion firstRegion = htu.getHBaseCluster().getRegions(TableName.valueOf(this.getName()))
        .get(0);
    firstRegion.flushcache();
    HDFSBlocksDistribution blocksDistribution1 = firstRegion.getHDFSBlocksDistribution();

    // Given the default replication factor is 2 and we have 2 HFiles,
    // we will have total of 4 replica of blocks on 3 datanodes; thus there
    // must be at least one host that have replica for 2 HFiles. That host's
    // weight will be equal to the unique block weight.
    long uniqueBlocksWeight1 = blocksDistribution1.getUniqueBlocksTotalWeight();
    StringBuilder sb = new StringBuilder();
    for (String host: blocksDistribution1.getTopHosts()) {
      if (sb.length() > 0) sb.append(", ");
      sb.append(host);
      sb.append("=");
      sb.append(blocksDistribution1.getWeight(host));
    }

    String topHost = blocksDistribution1.getTopHosts().get(0);
    long topHostWeight = blocksDistribution1.getWeight(topHost);
    String msg = "uniqueBlocksWeight=" + uniqueBlocksWeight1 + ", topHostWeight=" +
      topHostWeight + ", topHost=" + topHost + "; " + sb.toString();
    LOG.info(msg);
    assertTrue(msg, uniqueBlocksWeight1 == topHostWeight);

    // use the static method to compute the value, it should be the same.
    // static method is used by load balancer or other components
    HDFSBlocksDistribution blocksDistribution2 = HRegion.computeHDFSBlocksDistribution(
        htu.getConfiguration(), firstRegion.getTableDesc(), firstRegion.getRegionInfo());
    long uniqueBlocksWeight2 = blocksDistribution2.getUniqueBlocksTotalWeight();

    assertTrue(uniqueBlocksWeight1 == uniqueBlocksWeight2);

    ht.close();
  } finally {
    if (cluster != null) {
      htu.shutdownMiniCluster();
    }
  }
}
 
开发者ID:grokcoder,项目名称:pbase,代码行数:72,代码来源:TestHRegion.java

示例7: testgetHDFSBlocksDistribution

import org.apache.hadoop.hbase.HDFSBlocksDistribution; //导入方法依赖的package包/类
@Test
public void testgetHDFSBlocksDistribution() throws Exception {
  HBaseTestingUtility htu = new HBaseTestingUtility();
  final int DEFAULT_BLOCK_SIZE = 1024;
  htu.getConfiguration().setLong("dfs.block.size", DEFAULT_BLOCK_SIZE);
  htu.getConfiguration().setInt("dfs.replication", 2);

  // set up a cluster with 3 nodes
  MiniHBaseCluster cluster = null;
  String dataNodeHosts[] = new String[] { "host1", "host2", "host3" };
  int regionServersCount = 3;

  try {
    cluster = htu.startMiniCluster(1, regionServersCount, dataNodeHosts);
    byte[][] families = { fam1, fam2 };
    HTable ht = htu.createTable(Bytes.toBytes(this.getName()), families);

    // Setting up region
    byte row[] = Bytes.toBytes("row1");
    byte col[] = Bytes.toBytes("col1");

    Put put = new Put(row);
    put.add(fam1, col, 1, Bytes.toBytes("test1"));
    put.add(fam2, col, 1, Bytes.toBytes("test2"));
    ht.put(put);

    HRegion firstRegion = htu.getHBaseCluster().getRegions(TableName.valueOf(this.getName()))
        .get(0);
    firstRegion.flushcache();
    HDFSBlocksDistribution blocksDistribution1 = firstRegion.getHDFSBlocksDistribution();

    // given the default replication factor is 2 and we have 2 HFiles,
    // we will have total of 4 replica of blocks on 3 datanodes; thus there
    // must be at least one host that have replica for 2 HFiles. That host's
    // weight will be equal to the unique block weight.
    long uniqueBlocksWeight1 = blocksDistribution1.getUniqueBlocksTotalWeight();

    String topHost = blocksDistribution1.getTopHosts().get(0);
    long topHostWeight = blocksDistribution1.getWeight(topHost);
    assertTrue(uniqueBlocksWeight1 == topHostWeight);

    // use the static method to compute the value, it should be the same.
    // static method is used by load balancer or other components
    HDFSBlocksDistribution blocksDistribution2 = HRegion.computeHDFSBlocksDistribution(
        htu.getConfiguration(), firstRegion.getTableDesc(), firstRegion.getRegionInfo());
    long uniqueBlocksWeight2 = blocksDistribution2.getUniqueBlocksTotalWeight();

    assertTrue(uniqueBlocksWeight1 == uniqueBlocksWeight2);

    ht.close();
  } finally {
    if (cluster != null) {
      htu.shutdownMiniCluster();
    }
  }
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:57,代码来源:TestHRegion.java

示例8: testgetHDFSBlocksDistribution

import org.apache.hadoop.hbase.HDFSBlocksDistribution; //导入方法依赖的package包/类
@Test
public void testgetHDFSBlocksDistribution() throws Exception {
  HBaseTestingUtility htu = new HBaseTestingUtility();
  // Why do we set the block size in this test?  If we set it smaller than the kvs, then we'll
  // break up the file in to more pieces that can be distributed across the three nodes and we
  // won't be able to have the condition this test asserts; that at least one node has
  // a copy of all replicas -- if small block size, then blocks are spread evenly across the
  // the three nodes.  hfilev3 with tags seems to put us over the block size.  St.Ack.
  // final int DEFAULT_BLOCK_SIZE = 1024;
  // htu.getConfiguration().setLong("dfs.blocksize", DEFAULT_BLOCK_SIZE);
  htu.getConfiguration().setInt("dfs.replication", 2);

  // set up a cluster with 3 nodes
  MiniHBaseCluster cluster = null;
  String dataNodeHosts[] = new String[] { "host1", "host2", "host3" };
  int regionServersCount = 3;

  try {
    cluster = htu.startMiniCluster(1, regionServersCount, dataNodeHosts);
    byte[][] families = { fam1, fam2 };
    Table ht = htu.createTable(tableName, families);

    // Setting up region
    byte row[] = Bytes.toBytes("row1");
    byte col[] = Bytes.toBytes("col1");

    Put put = new Put(row);
    put.addColumn(fam1, col, 1, Bytes.toBytes("test1"));
    put.addColumn(fam2, col, 1, Bytes.toBytes("test2"));
    ht.put(put);

    HRegion firstRegion = htu.getHBaseCluster().getRegions(tableName).get(0);
    firstRegion.flush(true);
    HDFSBlocksDistribution blocksDistribution1 = firstRegion.getHDFSBlocksDistribution();

    // Given the default replication factor is 2 and we have 2 HFiles,
    // we will have total of 4 replica of blocks on 3 datanodes; thus there
    // must be at least one host that have replica for 2 HFiles. That host's
    // weight will be equal to the unique block weight.
    long uniqueBlocksWeight1 = blocksDistribution1.getUniqueBlocksTotalWeight();
    StringBuilder sb = new StringBuilder();
    for (String host: blocksDistribution1.getTopHosts()) {
      if (sb.length() > 0) sb.append(", ");
      sb.append(host);
      sb.append("=");
      sb.append(blocksDistribution1.getWeight(host));
    }

    String topHost = blocksDistribution1.getTopHosts().get(0);
    long topHostWeight = blocksDistribution1.getWeight(topHost);
    String msg = "uniqueBlocksWeight=" + uniqueBlocksWeight1 + ", topHostWeight=" +
      topHostWeight + ", topHost=" + topHost + "; " + sb.toString();
    LOG.info(msg);
    assertTrue(msg, uniqueBlocksWeight1 == topHostWeight);

    // use the static method to compute the value, it should be the same.
    // static method is used by load balancer or other components
    HDFSBlocksDistribution blocksDistribution2 = HRegion.computeHDFSBlocksDistribution(
        htu.getConfiguration(), firstRegion.getTableDescriptor(), firstRegion.getRegionInfo());
    long uniqueBlocksWeight2 = blocksDistribution2.getUniqueBlocksTotalWeight();

    assertTrue(uniqueBlocksWeight1 == uniqueBlocksWeight2);

    ht.close();
  } finally {
    if (cluster != null) {
      htu.shutdownMiniCluster();
    }
  }
}
 
开发者ID:apache,项目名称:hbase,代码行数:71,代码来源:TestHRegion.java

示例9: testgetHDFSBlocksDistribution

import org.apache.hadoop.hbase.HDFSBlocksDistribution; //导入方法依赖的package包/类
@Test
public void testgetHDFSBlocksDistribution() throws Exception {
  HBaseTestingUtility htu = new HBaseTestingUtility();
  // Why do we set the block size in this test?  If we set it smaller than the kvs, then we'll
  // break up the file in to more pieces that can be distributed across the three nodes and we
  // won't be able to have the condition this test asserts; that at least one node has
  // a copy of all replicas -- if small block size, then blocks are spread evenly across the
  // the three nodes.  hfilev3 with tags seems to put us over the block size.  St.Ack.
  // final int DEFAULT_BLOCK_SIZE = 1024;
  // htu.getConfiguration().setLong("dfs.blocksize", DEFAULT_BLOCK_SIZE);
  htu.getConfiguration().setInt("dfs.replication", 2);

  // set up a cluster with 3 nodes
  MiniHBaseCluster cluster = null;
  String dataNodeHosts[] = new String[] { "host1", "host2", "host3" };
  int regionServersCount = 3;

  try {
    cluster = htu.startMiniCluster(1, regionServersCount, dataNodeHosts);
    byte[][] families = { fam1, fam2 };
    HTable ht = htu.createTable(Bytes.toBytes(this.getName()), families);

    // Setting up region
    byte row[] = Bytes.toBytes("row1");
    byte col[] = Bytes.toBytes("col1");

    Put put = new Put(row);
    put.add(fam1, col, 1, Bytes.toBytes("test1"));
    put.add(fam2, col, 1, Bytes.toBytes("test2"));
    ht.put(put);

    HRegion firstRegion = htu.getHBaseCluster().getRegions(TableName.valueOf(this.getName()))
        .get(0);
    firstRegion.flushcache();
    HDFSBlocksDistribution blocksDistribution1 = firstRegion.getHDFSBlocksDistribution();

    // Given the default replication factor is 2 and we have 2 HFiles,
    // we will have total of 4 replica of blocks on 3 datanodes; thus there
    // must be at least one host that have replica for 2 HFiles. That host's
    // weight will be equal to the unique block weight.
    long uniqueBlocksWeight1 = blocksDistribution1.getUniqueBlocksTotalWeight();
    StringBuilder sb = new StringBuilder();
    for (String host: blocksDistribution1.getTopHosts()) {
      if (sb.length() > 0) sb.append(", ");
      sb.append(host);
      sb.append("=");
      sb.append(blocksDistribution1.getWeight(host));
    }

    String topHost = blocksDistribution1.getTopHosts().get(0);
    long topHostWeight = blocksDistribution1.getWeight(topHost);
    String msg = "uniqueBlocksWeight=" + uniqueBlocksWeight1 + ", topHostWeight=" +
      topHostWeight + ", topHost=" + topHost + "; " + sb.toString();
    LOG.info(msg);
    assertTrue(msg, uniqueBlocksWeight1 == topHostWeight);

    // use the static method to compute the value, it should be the same.
    // static method is used by load balancer or other components
    HDFSBlocksDistribution blocksDistribution2 = HRegion.computeHDFSBlocksDistribution(
        htu.getConfiguration(), firstRegion.getTableDesc(), firstRegion.getRegionInfo());
    long uniqueBlocksWeight2 = blocksDistribution2.getUniqueBlocksTotalWeight();

    assertTrue(uniqueBlocksWeight1 == uniqueBlocksWeight2);

    ht.close();
  } finally {
    if (cluster != null) {
      htu.shutdownMiniCluster();
    }
  }
}
 
开发者ID:shenli-uiuc,项目名称:PyroDB,代码行数:72,代码来源:TestHRegion.java


注:本文中的org.apache.hadoop.hbase.HDFSBlocksDistribution.getUniqueBlocksTotalWeight方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。