当前位置: 首页>>代码示例>>Java>>正文


Java HStore.getStoreHomedir方法代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.regionserver.HStore.getStoreHomedir方法的典型用法代码示例。如果您正苦于以下问题:Java HStore.getStoreHomedir方法的具体用法?Java HStore.getStoreHomedir怎么用?Java HStore.getStoreHomedir使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hbase.regionserver.HStore的用法示例。


在下文中一共展示了HStore.getStoreHomedir方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: createReferences

import org.apache.hadoop.hbase.regionserver.HStore; //导入方法依赖的package包/类
/**
 * @param services Master services instance.
 * @param htd
 * @param parent
 * @param daughter
 * @param midkey
 * @param top True if we are to write a 'top' reference.
 * @return Path to reference we created.
 * @throws IOException
 */
private Path createReferences(final MasterServices services,
    final HTableDescriptor htd, final HRegionInfo parent,
    final HRegionInfo daughter, final byte [] midkey, final boolean top)
throws IOException {
  Path rootdir = services.getMasterFileSystem().getRootDir();
  Path tabledir = FSUtils.getTableDir(rootdir, parent.getTable());
  Path storedir = HStore.getStoreHomedir(tabledir, daughter,
    htd.getColumnFamilies()[0].getName());
  Reference ref =
    top? Reference.createTopReference(midkey): Reference.createBottomReference(midkey);
  long now = System.currentTimeMillis();
  // Reference name has this format: StoreFile#REF_NAME_PARSER
  Path p = new Path(storedir, Long.toString(now) + "." + parent.getEncodedName());
  FileSystem fs = services.getMasterFileSystem().getFileSystem();
  ref.write(fs, p);
  return p;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:28,代码来源:TestCatalogJanitor.java

示例2: createReferences

import org.apache.hadoop.hbase.regionserver.HStore; //导入方法依赖的package包/类
private Path createReferences(final MasterServices services,
    final TableDescriptor td, final HRegionInfo parent,
    final HRegionInfo daughter, final byte [] midkey, final boolean top)
throws IOException {
  Path rootdir = services.getMasterFileSystem().getRootDir();
  Path tabledir = FSUtils.getTableDir(rootdir, parent.getTable());
  Path storedir = HStore.getStoreHomedir(tabledir, daughter,
    td.getColumnFamilies()[0].getName());
  Reference ref =
    top? Reference.createTopReference(midkey): Reference.createBottomReference(midkey);
  long now = System.currentTimeMillis();
  // Reference name has this format: StoreFile#REF_NAME_PARSER
  Path p = new Path(storedir, Long.toString(now) + "." + parent.getEncodedName());
  FileSystem fs = services.getMasterFileSystem().getFileSystem();
  ref.write(fs, p);
  return p;
}
 
开发者ID:apache,项目名称:hbase,代码行数:18,代码来源:TestCatalogJanitor.java

示例3: createReferences

import org.apache.hadoop.hbase.regionserver.HStore; //导入方法依赖的package包/类
/**
 * @param services Master services instance.
 * @param htd
 * @param parent
 * @param daughter
 * @param midkey
 * @param top True if we are to write a 'top' reference.
 * @return Path to reference we created.
 * @throws IOException
 */
private Path createReferences(final MasterServices services,
    final HTableDescriptor htd, final HRegionInfo parent,
    final HRegionInfo daughter, final byte [] midkey, final boolean top)
throws IOException {
  Path rootdir = services.getMasterFileSystem().getRootDir();
  Path tabledir = HTableDescriptor.getTableDir(rootdir, parent.getTableName());
  Path storedir = HStore.getStoreHomedir(tabledir, daughter.getEncodedName(),
    htd.getColumnFamilies()[0].getName());
  Reference ref =
    top? Reference.createTopReference(midkey): Reference.createBottomReference(midkey);
  long now = System.currentTimeMillis();
  // Reference name has this format: StoreFile#REF_NAME_PARSER
  Path p = new Path(storedir, Long.toString(now) + "." + parent.getEncodedName());
  FileSystem fs = services.getMasterFileSystem().getFileSystem();
  ref.write(fs, p);
  return p;
}
 
开发者ID:daidong,项目名称:DominoHBase,代码行数:28,代码来源:TestCatalogJanitor.java

示例4: getStoreArchivePath

import org.apache.hadoop.hbase.regionserver.HStore; //导入方法依赖的package包/类
/**
 * Get the directory to archive a store directory
 * @param conf {@link Configuration} to read for the archive directory name.
 * @param region parent region information under which the store currently lives
 * @param tabledir directory for the table under which the store currently lives
 * @param family name of the family in the store
 * @return {@link Path} to the directory to archive the given store or <tt>null</tt> if it should
 *         not be archived
 */
public static Path getStoreArchivePath(Configuration conf,
                                       HRegionInfo region,
                                       Path tabledir,
    byte[] family) throws IOException {
  TableName tableName =
      FSUtils.getTableName(tabledir);
  Path rootDir = FSUtils.getRootDir(conf);
  Path tableArchiveDir = getTableArchivePath(rootDir, tableName);
  return HStore.getStoreHomedir(tableArchiveDir, region, family);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:20,代码来源:HFileArchiveUtil.java

示例5: testCleanParent

import org.apache.hadoop.hbase.regionserver.HStore; //导入方法依赖的package包/类
/**
 * Test clearing a split parent.
 */
@Test
public void testCleanParent() throws IOException, InterruptedException {
  TableDescriptor td = createTableDescriptorForCurrentMethod();
  // Create regions.
  HRegionInfo parent =
      new HRegionInfo(td.getTableName(), Bytes.toBytes("aaa"), Bytes.toBytes("eee"));
  HRegionInfo splita =
      new HRegionInfo(td.getTableName(), Bytes.toBytes("aaa"), Bytes.toBytes("ccc"));
  HRegionInfo splitb =
      new HRegionInfo(td.getTableName(), Bytes.toBytes("ccc"), Bytes.toBytes("eee"));
  // Test that when both daughter regions are in place, that we do not remove the parent.
  Result r = createResult(parent, splita, splitb);
  // Add a reference under splitA directory so we don't clear out the parent.
  Path rootdir = this.masterServices.getMasterFileSystem().getRootDir();
  Path tabledir = FSUtils.getTableDir(rootdir, td.getTableName());
  Path parentdir = new Path(tabledir, parent.getEncodedName());
  Path storedir = HStore.getStoreHomedir(tabledir, splita, td.getColumnFamilies()[0].getName());
  Reference ref = Reference.createTopReference(Bytes.toBytes("ccc"));
  long now = System.currentTimeMillis();
  // Reference name has this format: StoreFile#REF_NAME_PARSER
  Path p = new Path(storedir, Long.toString(now) + "." + parent.getEncodedName());
  FileSystem fs = this.masterServices.getMasterFileSystem().getFileSystem();
  Path path = ref.write(fs, p);
  assertTrue(fs.exists(path));
  LOG.info("Created reference " + path);
  // Add a parentdir for kicks so can check it gets removed by the catalogjanitor.
  fs.mkdirs(parentdir);
  assertFalse(this.janitor.cleanParent(parent, r));
  ProcedureTestingUtility.waitAllProcedures(masterServices.getMasterProcedureExecutor());
  assertTrue(fs.exists(parentdir));
  // Remove the reference file and try again.
  assertTrue(fs.delete(p, true));
  assertTrue(this.janitor.cleanParent(parent, r));
  // Parent cleanup is run async as a procedure. Make sure parentdir is removed.
  ProcedureTestingUtility.waitAllProcedures(masterServices.getMasterProcedureExecutor());
  assertTrue(!fs.exists(parentdir));
}
 
开发者ID:apache,项目名称:hbase,代码行数:41,代码来源:TestCatalogJanitor.java

示例6: checkDaughterInFs

import org.apache.hadoop.hbase.regionserver.HStore; //导入方法依赖的package包/类
/**
 * Checks if a daughter region -- either splitA or splitB -- still holds
 * references to parent.
 * @param parent Parent region
 * @param daughter Daughter region
 * @return A pair where the first boolean says whether or not the daughter
 * region directory exists in the filesystem and then the second boolean says
 * whether the daughter has references to the parent.
 * @throws IOException
 */
Pair<Boolean, Boolean> checkDaughterInFs(final HRegionInfo parent, final HRegionInfo daughter)
throws IOException {
  boolean references = false;
  boolean exists = false;
  if (daughter == null)  {
    return new Pair<Boolean, Boolean>(Boolean.FALSE, Boolean.FALSE);
  }
  FileSystem fs = this.services.getMasterFileSystem().getFileSystem();
  Path rootdir = this.services.getMasterFileSystem().getRootDir();
  Path tabledir = new Path(rootdir, daughter.getTableNameAsString());
  Path regiondir = new Path(tabledir, daughter.getEncodedName());
  exists = fs.exists(regiondir);
  if (!exists) {
    LOG.warn("Daughter regiondir does not exist: " + regiondir.toString());
    return new Pair<Boolean, Boolean>(exists, Boolean.FALSE);
  }
  HTableDescriptor parentDescriptor = getTableDescriptor(parent.getTableName());

  for (HColumnDescriptor family: parentDescriptor.getFamilies()) {
    Path p = HStore.getStoreHomedir(tabledir, daughter.getEncodedName(),
      family.getName());
    if (!fs.exists(p)) continue;
    // Look for reference files.  Call listStatus with anonymous instance of PathFilter.
    FileStatus [] ps = FSUtils.listStatus(fs, p,
        new PathFilter () {
          public boolean accept(Path path) {
            return StoreFile.isReference(path);
          }
        }
    );

    if (ps != null && ps.length > 0) {
      references = true;
      break;
    }
  }
  return new Pair<Boolean, Boolean>(Boolean.valueOf(exists),
    Boolean.valueOf(references));
}
 
开发者ID:daidong,项目名称:DominoHBase,代码行数:50,代码来源:CatalogJanitor.java

示例7: testCleanParent

import org.apache.hadoop.hbase.regionserver.HStore; //导入方法依赖的package包/类
@Test
public void testCleanParent() throws IOException, InterruptedException {
  HBaseTestingUtility htu = new HBaseTestingUtility();
  setRootDirAndCleanIt(htu, "testCleanParent");
  Server server = new MockServer(htu);
  try {
    MasterServices services = new MockMasterServices(server);
    CatalogJanitor janitor = new CatalogJanitor(server, services);
    // Create regions.
    HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("table"));
    htd.addFamily(new HColumnDescriptor("f"));
    HRegionInfo parent =
      new HRegionInfo(htd.getTableName(), Bytes.toBytes("aaa"),
          Bytes.toBytes("eee"));
    HRegionInfo splita =
      new HRegionInfo(htd.getTableName(), Bytes.toBytes("aaa"),
          Bytes.toBytes("ccc"));
    HRegionInfo splitb =
      new HRegionInfo(htd.getTableName(), Bytes.toBytes("ccc"),
          Bytes.toBytes("eee"));
    // Test that when both daughter regions are in place, that we do not
    // remove the parent.
    Result r = createResult(parent, splita, splitb);
    // Add a reference under splitA directory so we don't clear out the parent.
    Path rootdir = services.getMasterFileSystem().getRootDir();
    Path tabledir =
      FSUtils.getTableDir(rootdir, htd.getTableName());
    Path storedir = HStore.getStoreHomedir(tabledir, splita,
        htd.getColumnFamilies()[0].getName());
    Reference ref = Reference.createTopReference(Bytes.toBytes("ccc"));
    long now = System.currentTimeMillis();
    // Reference name has this format: StoreFile#REF_NAME_PARSER
    Path p = new Path(storedir, Long.toString(now) + "." + parent.getEncodedName());
    FileSystem fs = services.getMasterFileSystem().getFileSystem();
    Path path = ref.write(fs, p);
    assertTrue(fs.exists(path));
    assertFalse(janitor.cleanParent(parent, r));
    // Remove the reference file and try again.
    assertTrue(fs.delete(p, true));
    assertTrue(janitor.cleanParent(parent, r));
  } finally {
    server.stop("shutdown");
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:45,代码来源:TestCatalogJanitor.java

示例8: testArchiveOldRegion

import org.apache.hadoop.hbase.regionserver.HStore; //导入方法依赖的package包/类
@Test
public void testArchiveOldRegion() throws Exception {
  String table = "table";
  HBaseTestingUtility htu = new HBaseTestingUtility();
  setRootDirAndCleanIt(htu, "testCleanParent");
  Server server = new MockServer(htu);
  MasterServices services = new MockMasterServices(server);

  // create the janitor
  CatalogJanitor janitor = new CatalogJanitor(server, services);

  // Create regions.
  HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(table));
  htd.addFamily(new HColumnDescriptor("f"));
  HRegionInfo parent = new HRegionInfo(htd.getTableName(),
      Bytes.toBytes("aaa"), Bytes.toBytes("eee"));
  HRegionInfo splita = new HRegionInfo(htd.getTableName(),
      Bytes.toBytes("aaa"), Bytes.toBytes("ccc"));
  HRegionInfo splitb = new HRegionInfo(htd.getTableName(),
      Bytes.toBytes("ccc"),
      Bytes.toBytes("eee"));

  // Test that when both daughter regions are in place, that we do not
  // remove the parent.
  Result parentMetaRow = createResult(parent, splita, splitb);
  FileSystem fs = FileSystem.get(htu.getConfiguration());
  Path rootdir = services.getMasterFileSystem().getRootDir();
  // have to set the root directory since we use it in HFileDisposer to figure out to get to the
  // archive directory. Otherwise, it just seems to pick the first root directory it can find (so
  // the single test passes, but when the full suite is run, things get borked).
  FSUtils.setRootDir(fs.getConf(), rootdir);
  Path tabledir = FSUtils.getTableDir(rootdir, htd.getTableName());
  Path storedir = HStore.getStoreHomedir(tabledir, parent, htd.getColumnFamilies()[0].getName());
  Path storeArchive = HFileArchiveUtil.getStoreArchivePath(services.getConfiguration(), parent,
    tabledir, htd.getColumnFamilies()[0].getName());
  LOG.debug("Table dir:" + tabledir);
  LOG.debug("Store dir:" + storedir);
  LOG.debug("Store archive dir:" + storeArchive);

  // add a couple of store files that we can check for
  FileStatus[] mockFiles = addMockStoreFiles(2, services, storedir);
  // get the current store files for comparison
  FileStatus[] storeFiles = fs.listStatus(storedir);
  int index = 0;
  for (FileStatus file : storeFiles) {
    LOG.debug("Have store file:" + file.getPath());
    assertEquals("Got unexpected store file", mockFiles[index].getPath(),
      storeFiles[index].getPath());
    index++;
  }

  // do the cleaning of the parent
  assertTrue(janitor.cleanParent(parent, parentMetaRow));
  LOG.debug("Finished cleanup of parent region");

  // and now check to make sure that the files have actually been archived
  FileStatus[] archivedStoreFiles = fs.listStatus(storeArchive);
  logFiles("archived files", storeFiles);
  logFiles("archived files", archivedStoreFiles);

  assertArchiveEqualToOriginal(storeFiles, archivedStoreFiles, fs);

  // cleanup
  FSUtils.delete(fs, rootdir, true);
  services.stop("Test finished");
  server.stop("Test finished");
  janitor.cancel(true);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:69,代码来源:TestCatalogJanitor.java

示例9: testDuplicateHFileResolution

import org.apache.hadoop.hbase.regionserver.HStore; //导入方法依赖的package包/类
/**
 * Test that if a store file with the same name is present as those already backed up cause the
 * already archived files to be timestamped backup
 */
@Test
public void testDuplicateHFileResolution() throws Exception {
  String table = "table";
  HBaseTestingUtility htu = new HBaseTestingUtility();
  setRootDirAndCleanIt(htu, "testCleanParent");
  Server server = new MockServer(htu);
  MasterServices services = new MockMasterServices(server);

  // create the janitor

  CatalogJanitor janitor = new CatalogJanitor(server, services);

  // Create regions.
  HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(table));
  htd.addFamily(new HColumnDescriptor("f"));
  HRegionInfo parent = new HRegionInfo(htd.getTableName(),
      Bytes.toBytes("aaa"), Bytes.toBytes("eee"));
  HRegionInfo splita = new HRegionInfo(htd.getTableName(),
      Bytes.toBytes("aaa"), Bytes.toBytes("ccc"));
  HRegionInfo splitb = new HRegionInfo(htd.getTableName(),
      Bytes.toBytes("ccc"), Bytes.toBytes("eee"));
  // Test that when both daughter regions are in place, that we do not
  // remove the parent.
  Result r = createResult(parent, splita, splitb);

  FileSystem fs = FileSystem.get(htu.getConfiguration());

  Path rootdir = services.getMasterFileSystem().getRootDir();
  // have to set the root directory since we use it in HFileDisposer to figure out to get to the
  // archive directory. Otherwise, it just seems to pick the first root directory it can find (so
  // the single test passes, but when the full suite is run, things get borked).
  FSUtils.setRootDir(fs.getConf(), rootdir);
  Path tabledir = FSUtils.getTableDir(rootdir, parent.getTable());
  Path storedir = HStore.getStoreHomedir(tabledir, parent, htd.getColumnFamilies()[0].getName());
  System.out.println("Old root:" + rootdir);
  System.out.println("Old table:" + tabledir);
  System.out.println("Old store:" + storedir);

  Path storeArchive = HFileArchiveUtil.getStoreArchivePath(services.getConfiguration(), parent,
    tabledir, htd.getColumnFamilies()[0].getName());
  System.out.println("Old archive:" + storeArchive);

  // enable archiving, make sure that files get archived
  addMockStoreFiles(2, services, storedir);
  // get the current store files for comparison
  FileStatus[] storeFiles = fs.listStatus(storedir);
  // do the cleaning of the parent
  assertTrue(janitor.cleanParent(parent, r));

  // and now check to make sure that the files have actually been archived
  FileStatus[] archivedStoreFiles = fs.listStatus(storeArchive);
  assertArchiveEqualToOriginal(storeFiles, archivedStoreFiles, fs);

  // now add store files with the same names as before to check backup
  // enable archiving, make sure that files get archived
  addMockStoreFiles(2, services, storedir);

  // do the cleaning of the parent
  assertTrue(janitor.cleanParent(parent, r));

  // and now check to make sure that the files have actually been archived
  archivedStoreFiles = fs.listStatus(storeArchive);
  assertArchiveEqualToOriginal(storeFiles, archivedStoreFiles, fs, true);

  // cleanup
  services.stop("Test finished");
  server.stop("shutdown");
  janitor.cancel(true);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:74,代码来源:TestCatalogJanitor.java

示例10: testExcludeAllFromMinorCompaction

import org.apache.hadoop.hbase.regionserver.HStore; //导入方法依赖的package包/类
/**
 * This test is to test the scenario happened in HBASE-6901.
 * All files are bulk loaded and excluded from minor compaction.
 * Without the fix of HBASE-6901, an ArrayIndexOutOfBoundsException
 * will be thrown.
 */
@Ignore ("Flakey: See HBASE-9051") @Test
public void testExcludeAllFromMinorCompaction() throws Exception {
  Configuration conf = util.getConfiguration();
  conf.setInt("hbase.hstore.compaction.min", 2);
  generateRandomStartKeys(5);

  try {
    util.setJobWithoutMRCluster();
    util.startMiniCluster();
    final FileSystem fs = util.getDFSCluster().getFileSystem();
    HBaseAdmin admin = new HBaseAdmin(conf);
    HTable table = util.createTable(TABLE_NAME, FAMILIES);
    assertEquals("Should start with empty table", 0, util.countRows(table));

    // deep inspection: get the StoreFile dir
    final Path storePath = HStore.getStoreHomedir(
        FSUtils.getTableDir(FSUtils.getRootDir(conf), TABLE_NAME),
        admin.getTableRegions(TABLE_NAME).get(0),
        FAMILIES[0]);
    assertEquals(0, fs.listStatus(storePath).length);

    // Generate two bulk load files
    conf.setBoolean("hbase.mapreduce.hfileoutputformat.compaction.exclude",
        true);

    for (int i = 0; i < 2; i++) {
      Path testDir = util.getDataTestDirOnTestFS("testExcludeAllFromMinorCompaction_" + i);
      runIncrementalPELoad(conf, table, testDir);
      // Perform the actual load
      new LoadIncrementalHFiles(conf).doBulkLoad(testDir, table);
    }

    // Ensure data shows up
    int expectedRows = 2 * NMapInputFormat.getNumMapTasks(conf) * ROWSPERSPLIT;
    assertEquals("LoadIncrementalHFiles should put expected data in table",
        expectedRows, util.countRows(table));

    // should have a second StoreFile now
    assertEquals(2, fs.listStatus(storePath).length);

    // minor compactions shouldn't get rid of the file
    admin.compact(TABLE_NAME.getName());
    try {
      quickPoll(new Callable<Boolean>() {
        public Boolean call() throws Exception {
          return fs.listStatus(storePath).length == 1;
        }
      }, 5000);
      throw new IOException("SF# = " + fs.listStatus(storePath).length);
    } catch (AssertionError ae) {
      // this is expected behavior
    }

    // a major compaction should work though
    admin.majorCompact(TABLE_NAME.getName());
    quickPoll(new Callable<Boolean>() {
      public Boolean call() throws Exception {
        return fs.listStatus(storePath).length == 1;
      }
    }, 5000);

  } finally {
    util.shutdownMiniCluster();
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:72,代码来源:TestHFileOutputFormat.java

示例11: testExcludeMinorCompaction

import org.apache.hadoop.hbase.regionserver.HStore; //导入方法依赖的package包/类
@Test
public void testExcludeMinorCompaction() throws Exception {
  Configuration conf = util.getConfiguration();
  conf.setInt("hbase.hstore.compaction.min", 2);
  generateRandomStartKeys(5);

  try {
    util.setJobWithoutMRCluster();
    util.startMiniCluster();
    Path testDir = util.getDataTestDirOnTestFS("testExcludeMinorCompaction");
    final FileSystem fs = util.getTestFileSystem();
    HBaseAdmin admin = new HBaseAdmin(conf);
    HTable table = util.createTable(TABLE_NAME, FAMILIES);
    assertEquals("Should start with empty table", 0, util.countRows(table));

    // deep inspection: get the StoreFile dir
    final Path storePath = HStore.getStoreHomedir(
        FSUtils.getTableDir(FSUtils.getRootDir(conf), TABLE_NAME),
        admin.getTableRegions(TABLE_NAME).get(0),
        FAMILIES[0]);
    assertEquals(0, fs.listStatus(storePath).length);

    // put some data in it and flush to create a storefile
    Put p = new Put(Bytes.toBytes("test"));
    p.add(FAMILIES[0], Bytes.toBytes("1"), Bytes.toBytes("1"));
    table.put(p);
    admin.flush(TABLE_NAME.getName());
    assertEquals(1, util.countRows(table));
    quickPoll(new Callable<Boolean>() {
      public Boolean call() throws Exception {
        return fs.listStatus(storePath).length == 1;
      }
    }, 5000);

    // Generate a bulk load file with more rows
    conf.setBoolean("hbase.mapreduce.hfileoutputformat.compaction.exclude",
        true);
    runIncrementalPELoad(conf, table, testDir);

    // Perform the actual load
    new LoadIncrementalHFiles(conf).doBulkLoad(testDir, table);

    // Ensure data shows up
    int expectedRows = NMapInputFormat.getNumMapTasks(conf) * ROWSPERSPLIT;
    assertEquals("LoadIncrementalHFiles should put expected data in table",
        expectedRows + 1, util.countRows(table));

    // should have a second StoreFile now
    assertEquals(2, fs.listStatus(storePath).length);

    // minor compactions shouldn't get rid of the file
    admin.compact(TABLE_NAME.getName());
    try {
      quickPoll(new Callable<Boolean>() {
        public Boolean call() throws Exception {
          return fs.listStatus(storePath).length == 1;
        }
      }, 5000);
      throw new IOException("SF# = " + fs.listStatus(storePath).length);
    } catch (AssertionError ae) {
      // this is expected behavior
    }

    // a major compaction should work though
    admin.majorCompact(TABLE_NAME.getName());
    quickPoll(new Callable<Boolean>() {
      public Boolean call() throws Exception {
        return fs.listStatus(storePath).length == 1;
      }
    }, 5000);

  } finally {
    util.shutdownMiniCluster();
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:76,代码来源:TestHFileOutputFormat.java

示例12: testArchiveOldRegion

import org.apache.hadoop.hbase.regionserver.HStore; //导入方法依赖的package包/类
@Test
public void testArchiveOldRegion() throws Exception {
  String table = "table";
  HBaseTestingUtility htu = new HBaseTestingUtility();
  setRootDirAndCleanIt(htu, "testCleanParent");
  Server server = new MockServer(htu);
  MasterServices services = new MockMasterServices(server);

  // create the janitor
  CatalogJanitor janitor = new CatalogJanitor(server, services);

  // Create regions.
  HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(table));
  htd.addFamily(new HColumnDescriptor("f"));
  HRegionInfo parent = new HRegionInfo(htd.getTableName(),
      Bytes.toBytes("aaa"), Bytes.toBytes("eee"));
  HRegionInfo splita = new HRegionInfo(htd.getTableName(),
      Bytes.toBytes("aaa"), Bytes.toBytes("ccc"));
  HRegionInfo splitb = new HRegionInfo(htd.getTableName(),
      Bytes.toBytes("ccc"),
      Bytes.toBytes("eee"));

  // Test that when both daughter regions are in place, that we do not
  // remove the parent.
  Result parentMetaRow = createResult(parent, splita, splitb);
  FileSystem fs = FileSystem.get(htu.getConfiguration());
  Path rootdir = services.getMasterFileSystem().getRootDir();
  // have to set the root directory since we use it in HFileDisposer to figure out to get to the
  // archive directory. Otherwise, it just seems to pick the first root directory it can find (so
  // the single test passes, but when the full suite is run, things get borked).
  FSUtils.setRootDir(fs.getConf(), rootdir);
  Path tabledir = FSUtils.getTableDir(rootdir, htd.getTableName());
  Path storedir = HStore.getStoreHomedir(tabledir, parent, htd.getColumnFamilies()[0].getName());
  Path storeArchive = HFileArchiveUtil.getStoreArchivePath(services.getConfiguration(), parent,
    tabledir, htd.getColumnFamilies()[0].getName());
  LOG.debug("Table dir:" + tabledir);
  LOG.debug("Store dir:" + storedir);
  LOG.debug("Store archive dir:" + storeArchive);

  // add a couple of store files that we can check for
  FileStatus[] mockFiles = addMockStoreFiles(2, services, storedir);
  // get the current store files for comparison
  FileStatus[] storeFiles = fs.listStatus(storedir);
  int index = 0;
  for (FileStatus file : storeFiles) {
    LOG.debug("Have store file:" + file.getPath());
    assertEquals("Got unexpected store file", mockFiles[index].getPath(),
      storeFiles[index].getPath());
    index++;
  }

  // do the cleaning of the parent
  assertTrue(janitor.cleanParent(parent, parentMetaRow));
  LOG.debug("Finished cleanup of parent region");

  // and now check to make sure that the files have actually been archived
  FileStatus[] archivedStoreFiles = fs.listStatus(storeArchive);
  logFiles("archived files", storeFiles);
  logFiles("archived files", archivedStoreFiles);

  assertArchiveEqualToOriginal(storeFiles, archivedStoreFiles, fs);

  // cleanup
  FSUtils.delete(fs, rootdir, true);
  services.stop("Test finished");
  server.stop("Test finished");
  janitor.join();
}
 
开发者ID:grokcoder,项目名称:pbase,代码行数:69,代码来源:TestCatalogJanitor.java

示例13: testDuplicateHFileResolution

import org.apache.hadoop.hbase.regionserver.HStore; //导入方法依赖的package包/类
/**
 * Test that if a store file with the same name is present as those already backed up cause the
 * already archived files to be timestamped backup
 */
@Test
public void testDuplicateHFileResolution() throws Exception {
  String table = "table";
  HBaseTestingUtility htu = new HBaseTestingUtility();
  setRootDirAndCleanIt(htu, "testCleanParent");
  Server server = new MockServer(htu);
  MasterServices services = new MockMasterServices(server);

  // create the janitor

  CatalogJanitor janitor = new CatalogJanitor(server, services);

  // Create regions.
  HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(table));
  htd.addFamily(new HColumnDescriptor("f"));
  HRegionInfo parent = new HRegionInfo(htd.getTableName(),
      Bytes.toBytes("aaa"), Bytes.toBytes("eee"));
  HRegionInfo splita = new HRegionInfo(htd.getTableName(),
      Bytes.toBytes("aaa"), Bytes.toBytes("ccc"));
  HRegionInfo splitb = new HRegionInfo(htd.getTableName(),
      Bytes.toBytes("ccc"), Bytes.toBytes("eee"));
  // Test that when both daughter regions are in place, that we do not
  // remove the parent.
  Result r = createResult(parent, splita, splitb);

  FileSystem fs = FileSystem.get(htu.getConfiguration());

  Path rootdir = services.getMasterFileSystem().getRootDir();
  // have to set the root directory since we use it in HFileDisposer to figure out to get to the
  // archive directory. Otherwise, it just seems to pick the first root directory it can find (so
  // the single test passes, but when the full suite is run, things get borked).
  FSUtils.setRootDir(fs.getConf(), rootdir);
  Path tabledir = FSUtils.getTableDir(rootdir, parent.getTable());
  Path storedir = HStore.getStoreHomedir(tabledir, parent, htd.getColumnFamilies()[0].getName());
  System.out.println("Old root:" + rootdir);
  System.out.println("Old table:" + tabledir);
  System.out.println("Old store:" + storedir);

  Path storeArchive = HFileArchiveUtil.getStoreArchivePath(services.getConfiguration(), parent,
    tabledir, htd.getColumnFamilies()[0].getName());
  System.out.println("Old archive:" + storeArchive);

  // enable archiving, make sure that files get archived
  addMockStoreFiles(2, services, storedir);
  // get the current store files for comparison
  FileStatus[] storeFiles = fs.listStatus(storedir);
  // do the cleaning of the parent
  assertTrue(janitor.cleanParent(parent, r));

  // and now check to make sure that the files have actually been archived
  FileStatus[] archivedStoreFiles = fs.listStatus(storeArchive);
  assertArchiveEqualToOriginal(storeFiles, archivedStoreFiles, fs);

  // now add store files with the same names as before to check backup
  // enable archiving, make sure that files get archived
  addMockStoreFiles(2, services, storedir);

  // do the cleaning of the parent
  assertTrue(janitor.cleanParent(parent, r));

  // and now check to make sure that the files have actually been archived
  archivedStoreFiles = fs.listStatus(storeArchive);
  assertArchiveEqualToOriginal(storeFiles, archivedStoreFiles, fs, true);

  // cleanup
  services.stop("Test finished");
  server.stop("shutdown");
  janitor.join();
}
 
开发者ID:grokcoder,项目名称:pbase,代码行数:74,代码来源:TestCatalogJanitor.java

示例14: testExcludeAllFromMinorCompaction

import org.apache.hadoop.hbase.regionserver.HStore; //导入方法依赖的package包/类
/**
 * This test is to test the scenario happened in HBASE-6901.
 * All files are bulk loaded and excluded from minor compaction.
 * Without the fix of HBASE-6901, an ArrayIndexOutOfBoundsException
 * will be thrown.
 */
@Ignore ("Flakey: See HBASE-9051") @Test
public void testExcludeAllFromMinorCompaction() throws Exception {
  Configuration conf = util.getConfiguration();
  conf.setInt("hbase.hstore.compaction.min", 2);
  generateRandomStartKeys(5);

  try {
    util.startMiniCluster();
    final FileSystem fs = util.getDFSCluster().getFileSystem();
    HBaseAdmin admin = new HBaseAdmin(conf);
    HTable table = util.createTable(TABLE_NAME, FAMILIES);
    assertEquals("Should start with empty table", 0, util.countRows(table));

    // deep inspection: get the StoreFile dir
    final Path storePath = HStore.getStoreHomedir(
        FSUtils.getTableDir(FSUtils.getRootDir(conf), TABLE_NAME),
        admin.getTableRegions(TABLE_NAME).get(0),
        FAMILIES[0]);
    assertEquals(0, fs.listStatus(storePath).length);

    // Generate two bulk load files
    conf.setBoolean("hbase.mapreduce.hfileoutputformat.compaction.exclude",
        true);
    util.startMiniMapReduceCluster();

    for (int i = 0; i < 2; i++) {
      Path testDir = util.getDataTestDirOnTestFS("testExcludeAllFromMinorCompaction_" + i);
      runIncrementalPELoad(conf, table, testDir);
      // Perform the actual load
      new LoadIncrementalHFiles(conf).doBulkLoad(testDir, table);
    }

    // Ensure data shows up
    int expectedRows = 2 * NMapInputFormat.getNumMapTasks(conf) * ROWSPERSPLIT;
    assertEquals("LoadIncrementalHFiles should put expected data in table",
        expectedRows, util.countRows(table));

    // should have a second StoreFile now
    assertEquals(2, fs.listStatus(storePath).length);

    // minor compactions shouldn't get rid of the file
    admin.compact(TABLE_NAME.getName());
    try {
      quickPoll(new Callable<Boolean>() {
        public Boolean call() throws Exception {
          return fs.listStatus(storePath).length == 1;
        }
      }, 5000);
      throw new IOException("SF# = " + fs.listStatus(storePath).length);
    } catch (AssertionError ae) {
      // this is expected behavior
    }

    // a major compaction should work though
    admin.majorCompact(TABLE_NAME.getName());
    quickPoll(new Callable<Boolean>() {
      public Boolean call() throws Exception {
        return fs.listStatus(storePath).length == 1;
      }
    }, 5000);

  } finally {
    util.shutdownMiniMapReduceCluster();
    util.shutdownMiniCluster();
  }
}
 
开发者ID:grokcoder,项目名称:pbase,代码行数:73,代码来源:TestHFileOutputFormat.java

示例15: testExcludeMinorCompaction

import org.apache.hadoop.hbase.regionserver.HStore; //导入方法依赖的package包/类
@Test
public void testExcludeMinorCompaction() throws Exception {
  Configuration conf = util.getConfiguration();
  conf.setInt("hbase.hstore.compaction.min", 2);
  generateRandomStartKeys(5);

  try {
    util.startMiniCluster();
    Path testDir = util.getDataTestDirOnTestFS("testExcludeMinorCompaction");
    final FileSystem fs = util.getDFSCluster().getFileSystem();
    HBaseAdmin admin = new HBaseAdmin(conf);
    HTable table = util.createTable(TABLE_NAME, FAMILIES);
    assertEquals("Should start with empty table", 0, util.countRows(table));

    // deep inspection: get the StoreFile dir
    final Path storePath = HStore.getStoreHomedir(
        FSUtils.getTableDir(FSUtils.getRootDir(conf), TABLE_NAME),
        admin.getTableRegions(TABLE_NAME).get(0),
        FAMILIES[0]);
    assertEquals(0, fs.listStatus(storePath).length);

    // put some data in it and flush to create a storefile
    Put p = new Put(Bytes.toBytes("test"));
    p.add(FAMILIES[0], Bytes.toBytes("1"), Bytes.toBytes("1"));
    table.put(p);
    admin.flush(TABLE_NAME.getName());
    assertEquals(1, util.countRows(table));
    quickPoll(new Callable<Boolean>() {
      public Boolean call() throws Exception {
        return fs.listStatus(storePath).length == 1;
      }
    }, 5000);

    // Generate a bulk load file with more rows
    conf.setBoolean("hbase.mapreduce.hfileoutputformat.compaction.exclude",
        true);
    util.startMiniMapReduceCluster();
    runIncrementalPELoad(conf, table, testDir);

    // Perform the actual load
    new LoadIncrementalHFiles(conf).doBulkLoad(testDir, table);

    // Ensure data shows up
    int expectedRows = NMapInputFormat.getNumMapTasks(conf) * ROWSPERSPLIT;
    assertEquals("LoadIncrementalHFiles should put expected data in table",
        expectedRows + 1, util.countRows(table));

    // should have a second StoreFile now
    assertEquals(2, fs.listStatus(storePath).length);

    // minor compactions shouldn't get rid of the file
    admin.compact(TABLE_NAME.getName());
    try {
      quickPoll(new Callable<Boolean>() {
        public Boolean call() throws Exception {
          return fs.listStatus(storePath).length == 1;
        }
      }, 5000);
      throw new IOException("SF# = " + fs.listStatus(storePath).length);
    } catch (AssertionError ae) {
      // this is expected behavior
    }

    // a major compaction should work though
    admin.majorCompact(TABLE_NAME.getName());
    quickPoll(new Callable<Boolean>() {
      public Boolean call() throws Exception {
        return fs.listStatus(storePath).length == 1;
      }
    }, 5000);

  } finally {
    util.shutdownMiniMapReduceCluster();
    util.shutdownMiniCluster();
  }
}
 
开发者ID:grokcoder,项目名称:pbase,代码行数:77,代码来源:TestHFileOutputFormat.java


注:本文中的org.apache.hadoop.hbase.regionserver.HStore.getStoreHomedir方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。