当前位置: 首页>>代码示例>>Java>>正文


Java RegionSplitter类代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.util.RegionSplitter的典型用法代码示例。如果您正苦于以下问题:Java RegionSplitter类的具体用法?Java RegionSplitter怎么用?Java RegionSplitter使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


RegionSplitter类属于org.apache.hadoop.hbase.util包,在下文中一共展示了RegionSplitter类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: createWriteTable

import org.apache.hadoop.hbase.util.RegionSplitter; //导入依赖的package包/类
private void createWriteTable(int numberOfServers) throws IOException {
  int numberOfRegions = (int)(numberOfServers * regionsLowerLimit);
  LOG.info("Number of live regionservers: " + numberOfServers + ", "
      + "pre-splitting the canary table into " + numberOfRegions + " regions "
      + "(current  lower limi of regions per server is " + regionsLowerLimit
      + " and you can change it by config: "
      + HConstants.HBASE_CANARY_WRITE_PERSERVER_REGIONS_LOWERLIMIT_KEY + " )");
  HTableDescriptor desc = new HTableDescriptor(writeTableName);
  HColumnDescriptor family = new HColumnDescriptor(CANARY_TABLE_FAMILY_NAME);
  family.setMaxVersions(1);
  family.setTimeToLive(writeDataTTL);

  desc.addFamily(family);
  byte[][] splits = new RegionSplitter.HexStringSplit().split(numberOfRegions);
  admin.createTable(desc, splits);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:17,代码来源:Canary.java

示例2: createTable

import org.apache.hadoop.hbase.util.RegionSplitter; //导入依赖的package包/类
private void createTable() throws Exception {
  deleteTable();
  LOG.info("Creating table");
  Configuration conf = util.getConfiguration();
  String encodingKey = String.format(ENCODING_KEY, this.getClass().getSimpleName());
  DataBlockEncoding blockEncoding = DataBlockEncoding.valueOf(conf.get(encodingKey, "FAST_DIFF"));
  HTableDescriptor htd = new HTableDescriptor(TABLE_NAME);
  for (byte[] cf : dataGen.getColumnFamilies()) {
    HColumnDescriptor hcd = new HColumnDescriptor(cf);
    hcd.setDataBlockEncoding(blockEncoding);
    htd.addFamily(hcd);
  }
  int serverCount = util.getHBaseClusterInterface().getClusterStatus().getServersSize();
  byte[][] splits = new RegionSplitter.HexStringSplit().split(serverCount * REGIONS_PER_SERVER);
  util.getHBaseAdmin().createTable(htd, splits);
  LOG.info("Created table");
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:18,代码来源:IntegrationTestLazyCfLoading.java

示例3: run

import org.apache.hadoop.hbase.util.RegionSplitter; //导入依赖的package包/类
@Override
public void run() {
  long startTime, endTime;
  HTableDescriptor desc = new HTableDescriptor(TABLE_NAME);
  desc.addFamily(new HColumnDescriptor(COLUMN_NAME));
  SplitAlgorithm algo = new RegionSplitter.HexStringSplit();
  byte[][] splits = algo.split(REGION_COUNT);

  LOG.info(String.format("Creating table %s with %d splits.",
    TABLE_NAME, REGION_COUNT));
  startTime = System.currentTimeMillis();
  try {
    admin.createTable(desc, splits);
    endTime = System.currentTimeMillis();
    success = true;
    LOG.info(String.format("Pre-split table created successfully in %dms.",
      (endTime - startTime)));
  } catch (IOException e) {
    LOG.error("Failed to create table", e);
  } finally {
    doneSignal.countDown();
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:24,代码来源:IntegrationTestManyRegions.java

示例4: run

import org.apache.hadoop.hbase.util.RegionSplitter; //导入依赖的package包/类
@Override
public void run() {
  long startTime, endTime;
  HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(TABLE_NAME));
  desc.addFamily(new HColumnDescriptor(COLUMN_NAME));
  SplitAlgorithm algo = new RegionSplitter.HexStringSplit();
  byte[][] splits = algo.split(REGION_COUNT);

  LOG.info(String.format("Creating table %s with %d splits.",
    TABLE_NAME, REGION_COUNT));
  startTime = System.currentTimeMillis();
  try {
    admin.createTable(desc, splits);
    endTime = System.currentTimeMillis();
    success = true;
    LOG.info(String.format("Pre-split table created successfully in %dms.",
      (endTime - startTime)));
  } catch (IOException e) {
    LOG.error("Failed to create table", e);
  } finally {
    doneSignal.countDown();
  }
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:24,代码来源:IntegrationTestManyRegions.java

示例5: createWriteTable

import org.apache.hadoop.hbase.util.RegionSplitter; //导入依赖的package包/类
private void createWriteTable(int numberOfServers) throws IOException {
  int numberOfRegions = (int)(numberOfServers * regionsLowerLimit);
  LOG.info("Number of live regionservers: " + numberOfServers + ", "
      + "pre-splitting the canary table into " + numberOfRegions + " regions "
      + "(current lower limit of regions per server is " + regionsLowerLimit
      + " and you can change it by config: "
      + HConstants.HBASE_CANARY_WRITE_PERSERVER_REGIONS_LOWERLIMIT_KEY + " )");
  HTableDescriptor desc = new HTableDescriptor(writeTableName);
  HColumnDescriptor family = new HColumnDescriptor(CANARY_TABLE_FAMILY_NAME);
  family.setMaxVersions(1);
  family.setTimeToLive(writeDataTTL);

  desc.addFamily(family);
  byte[][] splits = new RegionSplitter.HexStringSplit().split(numberOfRegions);
  admin.createTable(desc, splits);
}
 
开发者ID:apache,项目名称:hbase,代码行数:17,代码来源:Canary.java

示例6: setInput

import org.apache.hadoop.hbase.util.RegionSplitter; //导入依赖的package包/类
/**
 * Configures the job to use TableSnapshotInputFormat to read from a snapshot.
 * @param conf the job to configure
 * @param snapshotName the name of the snapshot to read from
 * @param restoreDir a temporary directory to restore the snapshot into. Current user should
 * have write permissions to this directory, and this should not be a subdirectory of rootdir.
 * After the job is finished, restoreDir can be deleted.
 * @param numSplitsPerRegion how many input splits to generate per one region
 * @param splitAlgo SplitAlgorithm to be used when generating InputSplits
 * @throws IOException if an error occurs
 */
public static void setInput(Configuration conf, String snapshotName, Path restoreDir,
                            RegionSplitter.SplitAlgorithm splitAlgo, int numSplitsPerRegion)
        throws IOException {
  conf.set(SNAPSHOT_NAME_KEY, snapshotName);
  if (numSplitsPerRegion < 1) {
    throw new IllegalArgumentException("numSplits must be >= 1, " +
            "illegal numSplits : " + numSplitsPerRegion);
  }
  if (splitAlgo == null && numSplitsPerRegion > 1) {
    throw new IllegalArgumentException("Split algo can't be null when numSplits > 1");
  }
  if (splitAlgo != null) {
    conf.set(SPLIT_ALGO, splitAlgo.getClass().getName());
  }
  conf.setInt(NUM_SPLITS_PER_REGION, numSplitsPerRegion);
  Path rootDir = FSUtils.getRootDir(conf);
  FileSystem fs = rootDir.getFileSystem(conf);

  restoreDir = new Path(restoreDir, UUID.randomUUID().toString());

  // TODO: restore from record readers to parallelize.
  RestoreSnapshotHelper.copySnapshotForScanner(conf, fs, rootDir, restoreDir, snapshotName);

  conf.set(RESTORE_DIR_KEY, restoreDir.toString());
}
 
开发者ID:apache,项目名称:hbase,代码行数:37,代码来源:TableSnapshotInputFormatImpl.java

示例7: createTable

import org.apache.hadoop.hbase.util.RegionSplitter; //导入依赖的package包/类
private void createTable() throws Exception {
  deleteTable();
  LOG.info("Creating table");
  Configuration conf = util.getConfiguration();
  String encodingKey = String.format(ENCODING_KEY, this.getClass().getSimpleName());
  DataBlockEncoding blockEncoding = DataBlockEncoding.valueOf(conf.get(encodingKey, "FAST_DIFF"));
  HTableDescriptor htd = new HTableDescriptor(TABLE_NAME);
  for (byte[] cf : dataGen.getColumnFamilies()) {
    HColumnDescriptor hcd = new HColumnDescriptor(cf);
    hcd.setDataBlockEncoding(blockEncoding);
    htd.addFamily(hcd);
  }
  int serverCount = util.getHBaseClusterInterface().getClusterMetrics()
    .getLiveServerMetrics().size();
  byte[][] splits = new RegionSplitter.HexStringSplit().split(serverCount * REGIONS_PER_SERVER);
  util.getAdmin().createTable(htd, splits);
  LOG.info("Created table");
}
 
开发者ID:apache,项目名称:hbase,代码行数:19,代码来源:IntegrationTestLazyCfLoading.java

示例8: testCreateTableWithRegions

import org.apache.hadoop.hbase.util.RegionSplitter; //导入依赖的package包/类
@Test
public void testCreateTableWithRegions() throws Exception {
  HTableDescriptor desc = new HTableDescriptor(TABLE_NAME);
  desc.addFamily(new HColumnDescriptor("cf"));
  SplitAlgorithm algo = new RegionSplitter.HexStringSplit();
  byte[][] splits = algo.split(REGION_COUNT);

  LOG.info(String.format("Creating table %s with %d splits.", TABLE_NAME, REGION_COUNT));
  long startTime = System.currentTimeMillis();
  try {
    admin.createTable(desc, splits);
    LOG.info(String.format("Pre-split table created successfully in %dms.",
        (System.currentTimeMillis() - startTime)));
  } catch (IOException e) {
    LOG.error("Failed to create table", e);
  }
}
 
开发者ID:apache,项目名称:hbase,代码行数:18,代码来源:IntegrationTestManyRegions.java

示例9: createTable

import org.apache.hadoop.hbase.util.RegionSplitter; //导入依赖的package包/类
protected void createTable(HTableDescriptor htd) throws Exception {
  deleteTable();
  if (util.getHBaseClusterInterface() instanceof MiniHBaseCluster) {
    LOG.warn("Test does not make a lot of sense for minicluster. Will set flush size low.");
    htd.setConfiguration(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, "1048576");
  }
  byte[][] splits = new RegionSplitter.HexStringSplit().split(
      util.getHBaseClusterInterface().getClusterStatus().getServersSize());
  util.getHBaseAdmin().createTable(htd, splits);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:11,代码来源:StripeCompactionsPerformanceEvaluation.java

示例10: createMirroredTableIfNotExists

import org.apache.hadoop.hbase.util.RegionSplitter; //导入依赖的package包/类
public void createMirroredTableIfNotExists(String hbaseTableName, Integer versions)  {

        try {

            if (!DRY_RUN) {
                if (connection == null) {
                    connection = ConnectionFactory.createConnection(hbaseConf);
                }

                Admin admin = connection.getAdmin();
                TableName tableName = TableName.valueOf(hbaseTableName);

                if (!admin.tableExists(tableName)) {

                    LOGGER.info("table " + hbaseTableName + " does not exist in HBase. Creating...");

                    HTableDescriptor tableDescriptor = new HTableDescriptor(tableName);
                    HColumnDescriptor cd = new HColumnDescriptor("d");
                    cd.setMaxVersions(versions);
                    tableDescriptor.addFamily(cd);

                    // presplit into 16 regions
                    RegionSplitter.HexStringSplit splitter = new RegionSplitter.HexStringSplit();
                    byte[][] splitKeys = splitter.split(MIRRORED_TABLE_DEFAULT_REGIONS);

                    admin.createTable(tableDescriptor, splitKeys);
                } 

                knownHBaseTables.put(hbaseTableName, 1);
            }
        } catch (IOException e) {
            LOGGER.info("Failed to create table in HBase.");
            // TODO: wait and retry if failed. After a while set status of applier
            // to 'blocked' & handle by overseer by stopping the replicator
            e.printStackTrace();
        }
    }
 
开发者ID:mysql-time-machine,项目名称:replicator,代码行数:38,代码来源:HBaseSchemaManager.java

示例11: testSplit10_10

import org.apache.hadoop.hbase.util.RegionSplitter; //导入依赖的package包/类
@Test
public void testSplit10_10() throws Exception {
    int numRegions = 10;
    int cardinality = 10;

    RegionSplitter.SplitAlgorithm splitAlgorithm = new DecimalStringSplit(cardinality);
    byte[][] splits = splitAlgorithm.split(numRegions);
    assertEquals(numRegions - 1, splits.length);

    int digits = 2;
    assertEquals(String.format("%0" + digits + "d", 1), Bytes.toString(splits[0]));
    assertEquals(String.format("%0" + digits + "d", 9), Bytes.toString(splits[numRegions - 2]));
}
 
开发者ID:kakao,项目名称:hbase-tools,代码行数:14,代码来源:DecimalStringSplitTest.java

示例12: testSplit3_10

import org.apache.hadoop.hbase.util.RegionSplitter; //导入依赖的package包/类
@Test
public void testSplit3_10() throws Exception {
    int numRegions = 3;
    int cardinality = 10;

    RegionSplitter.SplitAlgorithm splitAlgorithm = new DecimalStringSplit(cardinality);
    byte[][] splits = splitAlgorithm.split(numRegions);
    assertEquals(numRegions - 1, splits.length);

    int digits = 2;
    assertEquals(String.format("%0" + digits + "d", 3), Bytes.toString(splits[0]));
    assertEquals(String.format("%0" + digits + "d", 6), Bytes.toString(splits[numRegions - 2]));
}
 
开发者ID:kakao,项目名称:hbase-tools,代码行数:14,代码来源:DecimalStringSplitTest.java

示例13: testSplit300_1000

import org.apache.hadoop.hbase.util.RegionSplitter; //导入依赖的package包/类
@Test
public void testSplit300_1000() throws Exception {
    int numRegions = 300;
    int cardinality = 1000;

    RegionSplitter.SplitAlgorithm splitAlgorithm = new DecimalStringSplit(cardinality);
    byte[][] splits = splitAlgorithm.split(numRegions);
    assertEquals(numRegions - 1, splits.length);

    int digits = 4;
    assertEquals(String.format("%0" + digits + "d", 3), Bytes.toString(splits[0]));
    assertEquals(String.format("%0" + digits + "d", 6), Bytes.toString(splits[1]));
    assertEquals(String.format("%0" + digits + "d", 10), Bytes.toString(splits[2]));
    assertEquals(String.format("%0" + digits + "d", 996), Bytes.toString(splits[numRegions - 2]));
}
 
开发者ID:kakao,项目名称:hbase-tools,代码行数:16,代码来源:DecimalStringSplitTest.java

示例14: createPreSplitLoadTestTable

import org.apache.hadoop.hbase.util.RegionSplitter; //导入依赖的package包/类
/**
 * Creates a pre-split table for load testing. If the table already exists,
 * logs a warning and continues.
 * @return the number of regions the table was split into
 */
public static int createPreSplitLoadTestTable(Configuration conf,
    TableDescriptor desc, ColumnFamilyDescriptor[] hcds,
    int numRegionsPerServer) throws IOException {
  return createPreSplitLoadTestTable(conf, desc, hcds,
    new RegionSplitter.HexStringSplit(), numRegionsPerServer);
}
 
开发者ID:apache,项目名称:hbase,代码行数:12,代码来源:HBaseTestingUtility.java

示例15: testWithMockedMapReduce

import org.apache.hadoop.hbase.util.RegionSplitter; //导入依赖的package包/类
@Override
protected void testWithMockedMapReduce(HBaseTestingUtility util, String snapshotName,
    int numRegions, int numSplitsPerRegion, int expectedNumSplits, boolean setLocalityEnabledTo)
    throws Exception {
  setupCluster();
  final TableName tableName = TableName.valueOf(name.getMethodName());
  try {
    createTableAndSnapshot(
      util, tableName, snapshotName, getStartRow(), getEndRow(), numRegions);

    JobConf job = new JobConf(util.getConfiguration());
    // setLocalityEnabledTo is ignored no matter what is specified, so as to test the case that
    // SNAPSHOT_INPUTFORMAT_LOCALITY_ENABLED_KEY is not explicitly specified
    // and the default value is taken.
    Path tmpTableDir = util.getDataTestDirOnTestFS(snapshotName);

    if (numSplitsPerRegion > 1) {
      TableMapReduceUtil.initTableSnapshotMapJob(snapshotName,
              COLUMNS, TestTableSnapshotMapper.class, ImmutableBytesWritable.class,
              NullWritable.class, job, false, tmpTableDir, new RegionSplitter.UniformSplit(),
              numSplitsPerRegion);
    } else {
      TableMapReduceUtil.initTableSnapshotMapJob(snapshotName,
              COLUMNS, TestTableSnapshotMapper.class, ImmutableBytesWritable.class,
              NullWritable.class, job, false, tmpTableDir);
    }

    // mapred doesn't support start and end keys? o.O
    verifyWithMockedMapReduce(job, numRegions, expectedNumSplits, getStartRow(), getEndRow());

  } finally {
    util.getAdmin().deleteSnapshot(snapshotName);
    util.deleteTable(tableName);
    tearDownCluster();
  }
}
 
开发者ID:apache,项目名称:hbase,代码行数:37,代码来源:TestTableSnapshotInputFormat.java


注:本文中的org.apache.hadoop.hbase.util.RegionSplitter类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。