当前位置: 首页>>代码示例>>Java>>正文


Java Store.getStoreHomedir方法代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.regionserver.Store.getStoreHomedir方法的典型用法代码示例。如果您正苦于以下问题:Java Store.getStoreHomedir方法的具体用法?Java Store.getStoreHomedir怎么用?Java Store.getStoreHomedir使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hbase.regionserver.Store的用法示例。


在下文中一共展示了Store.getStoreHomedir方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: createReferences

import org.apache.hadoop.hbase.regionserver.Store; //导入方法依赖的package包/类
/**
 * @param services Master services instance.
 * @param htd
 * @param parent
 * @param daughter
 * @param midkey
 * @param top True if we are to write a 'top' reference.
 * @return Path to reference we created.
 * @throws IOException
 */
private Path createReferences(final MasterServices services,
    final HTableDescriptor htd, final HRegionInfo parent,
    final HRegionInfo daughter, final byte [] midkey, final boolean top)
throws IOException {
  Path rootdir = services.getMasterFileSystem().getRootDir();
  Path tabledir = HTableDescriptor.getTableDir(rootdir, parent.getTableName());
  Path storedir = Store.getStoreHomedir(tabledir, daughter.getEncodedName(),
    htd.getColumnFamilies()[0].getName());
  Reference ref = new Reference(midkey,
    top? Reference.Range.top: Reference.Range.bottom);
  long now = System.currentTimeMillis();
  // Reference name has this format: StoreFile#REF_NAME_PARSER
  Path p = new Path(storedir, Long.toString(now) + "." + parent.getEncodedName());
  FileSystem fs = services.getMasterFileSystem().getFileSystem();
  ref.write(fs, p);
  return p;
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:28,代码来源:TestCatalogJanitor.java

示例2: checkDaughterInFs

import org.apache.hadoop.hbase.regionserver.Store; //导入方法依赖的package包/类
/**
 * Checks if a daughter region -- either splitA or splitB -- still holds
 * references to parent.
 * @param parent Parent region name.
 * @param split Which column family.
 * @param qualifier Which of the daughters to look at, splitA or splitB.
 * @return A pair where the first boolean says whether or not the daughter
 * region directory exists in the filesystem and then the second boolean says
 * whether the daughter has references to the parent.
 * @throws IOException
 */
Pair<Boolean, Boolean> checkDaughterInFs(final HRegionInfo parent,
  final HRegionInfo split,
  final byte [] qualifier)
throws IOException {
  boolean references = false;
  boolean exists = false;
  if (split == null)  {
    return new Pair<Boolean, Boolean>(Boolean.FALSE, Boolean.FALSE);
  }
  FileSystem fs = this.services.getMasterFileSystem().getFileSystem();
  Path rootdir = this.services.getMasterFileSystem().getRootDir();
  Path tabledir = new Path(rootdir, split.getTableNameAsString());
  Path regiondir = new Path(tabledir, split.getEncodedName());
  exists = fs.exists(regiondir);
  if (!exists) {
    LOG.warn("Daughter regiondir does not exist: " + regiondir.toString());
    return new Pair<Boolean, Boolean>(exists, Boolean.FALSE);
  }
  HTableDescriptor parentDescriptor = getTableDescriptor(parent.getTableName());

  for (HColumnDescriptor family: parentDescriptor.getFamilies()) {
    Path p = Store.getStoreHomedir(tabledir, split.getEncodedName(),
      family.getName());
    if (!fs.exists(p)) continue;
    // Look for reference files.  Call listStatus with anonymous instance of PathFilter.
    FileStatus [] ps = FSUtils.listStatus(fs, p,
        new PathFilter () {
          public boolean accept(Path path) {
            return StoreFile.isReference(path);
          }
        }
    );

    if (ps != null && ps.length > 0) {
      references = true;
      break;
    }
  }
  return new Pair<Boolean, Boolean>(Boolean.valueOf(exists),
    Boolean.valueOf(references));
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:53,代码来源:CatalogJanitor.java

示例3: testCleanParent

import org.apache.hadoop.hbase.regionserver.Store; //导入方法依赖的package包/类
@Test
public void testCleanParent() throws IOException, InterruptedException {
  HBaseTestingUtility htu = new HBaseTestingUtility();
  setRootDirAndCleanIt(htu, "testCleanParent");
  Server server = new MockServer(htu);
  try {
    MasterServices services = new MockMasterServices(server);
    CatalogJanitor janitor = new CatalogJanitor(server, services);
    // Create regions.
    HTableDescriptor htd = new HTableDescriptor("table");
    htd.addFamily(new HColumnDescriptor("f"));
    HRegionInfo parent =
      new HRegionInfo(htd.getName(), Bytes.toBytes("aaa"),
          Bytes.toBytes("eee"));
    HRegionInfo splita =
      new HRegionInfo(htd.getName(), Bytes.toBytes("aaa"),
          Bytes.toBytes("ccc"));
    HRegionInfo splitb =
      new HRegionInfo(htd.getName(), Bytes.toBytes("ccc"),
          Bytes.toBytes("eee"));
    // Test that when both daughter regions are in place, that we do not
    // remove the parent.
    List<KeyValue> kvs = new ArrayList<KeyValue>();
    kvs.add(new KeyValue(parent.getRegionName(), HConstants.CATALOG_FAMILY,
        HConstants.SPLITA_QUALIFIER, Writables.getBytes(splita)));
    kvs.add(new KeyValue(parent.getRegionName(), HConstants.CATALOG_FAMILY,
        HConstants.SPLITB_QUALIFIER, Writables.getBytes(splitb)));
    Result r = new Result(kvs);
    // Add a reference under splitA directory so we don't clear out the parent.
    Path rootdir = services.getMasterFileSystem().getRootDir();
    Path tabledir =
      HTableDescriptor.getTableDir(rootdir, htd.getName());
    Path storedir = Store.getStoreHomedir(tabledir, splita.getEncodedName(),
        htd.getColumnFamilies()[0].getName());
    Reference ref = new Reference(Bytes.toBytes("ccc"), Reference.Range.top);
    long now = System.currentTimeMillis();
    // Reference name has this format: StoreFile#REF_NAME_PARSER
    Path p = new Path(storedir, Long.toString(now) + "." + parent.getEncodedName());
    FileSystem fs = services.getMasterFileSystem().getFileSystem();
    Path path = ref.write(fs, p);
    assertTrue(fs.exists(path));
    assertFalse(janitor.cleanParent(parent, r));
    // Remove the reference file and try again.
    assertTrue(fs.delete(p, true));
    assertTrue(janitor.cleanParent(parent, r));
  } finally {
    server.stop("shutdown");
  }
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:50,代码来源:TestCatalogJanitor.java

示例4: testArchiveOldRegion

import org.apache.hadoop.hbase.regionserver.Store; //导入方法依赖的package包/类
@Test
public void testArchiveOldRegion() throws Exception {
  String table = "table";
  HBaseTestingUtility htu = new HBaseTestingUtility();
  setRootDirAndCleanIt(htu, "testCleanParent");
  Server server = new MockServer(htu);
  MasterServices services = new MockMasterServices(server);

  // create the janitor
  CatalogJanitor janitor = new CatalogJanitor(server, services);

  // Create regions.
  HTableDescriptor htd = new HTableDescriptor(table);
  htd.addFamily(new HColumnDescriptor("f"));
  HRegionInfo parent = new HRegionInfo(htd.getName(), Bytes.toBytes("aaa"), Bytes.toBytes("eee"));
  HRegionInfo splita = new HRegionInfo(htd.getName(), Bytes.toBytes("aaa"), Bytes.toBytes("ccc"));
  HRegionInfo splitb = new HRegionInfo(htd.getName(), Bytes.toBytes("ccc"), Bytes.toBytes("eee"));
  // Test that when both daughter regions are in place, that we do not
  // remove the parent.
  List<KeyValue> kvs = new ArrayList<KeyValue>();
  kvs.add(new KeyValue(parent.getRegionName(), HConstants.CATALOG_FAMILY,
      HConstants.SPLITA_QUALIFIER, Writables.getBytes(splita)));
  kvs.add(new KeyValue(parent.getRegionName(), HConstants.CATALOG_FAMILY,
      HConstants.SPLITB_QUALIFIER, Writables.getBytes(splitb)));
  Result r = new Result(kvs);

  FileSystem fs = FileSystem.get(htu.getConfiguration());
  Path rootdir = services.getMasterFileSystem().getRootDir();
  // have to set the root directory since we use it in HFileDisposer to figure out to get to the
  // archive directory. Otherwise, it just seems to pick the first root directory it can find (so
  // the single test passes, but when the full suite is run, things get borked).
  FSUtils.setRootDir(fs.getConf(), rootdir);
  Path tabledir = HTableDescriptor.getTableDir(rootdir, htd.getName());
  Path storedir = Store.getStoreHomedir(tabledir, parent.getEncodedName(),
    htd.getColumnFamilies()[0].getName());

  // delete the file and ensure that the files have been archived
  Path storeArchive = HFileArchiveUtil.getStoreArchivePath(services.getConfiguration(), parent,
    tabledir, htd.getColumnFamilies()[0].getName());

  // enable archiving, make sure that files get archived
  addMockStoreFiles(2, services, storedir);
  // get the current store files for comparison
  FileStatus[] storeFiles = fs.listStatus(storedir);
  for (FileStatus file : storeFiles) {
    System.out.println("Have store file:" + file.getPath());
  }

  // do the cleaning of the parent
  assertTrue(janitor.cleanParent(parent, r));

  // and now check to make sure that the files have actually been archived
  FileStatus[] archivedStoreFiles = fs.listStatus(storeArchive);
  assertArchiveEqualToOriginal(storeFiles, archivedStoreFiles, fs);

  // cleanup
  services.stop("Test finished");
  server.stop("shutdown");
  janitor.join();
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:61,代码来源:TestCatalogJanitor.java

示例5: testDuplicateHFileResolution

import org.apache.hadoop.hbase.regionserver.Store; //导入方法依赖的package包/类
/**
 * Test that if a store file with the same name is present as those already backed up cause the
 * already archived files to be timestamped backup
 */
@Test
public void testDuplicateHFileResolution() throws Exception {
  String table = "table";
  HBaseTestingUtility htu = new HBaseTestingUtility();
  setRootDirAndCleanIt(htu, "testCleanParent");
  Server server = new MockServer(htu);
  MasterServices services = new MockMasterServices(server);

  // create the janitor
  CatalogJanitor janitor = new CatalogJanitor(server, services);

  // Create regions.
  HTableDescriptor htd = new HTableDescriptor(table);
  htd.addFamily(new HColumnDescriptor("f"));
  HRegionInfo parent = new HRegionInfo(htd.getName(), Bytes.toBytes("aaa"), Bytes.toBytes("eee"));
  HRegionInfo splita = new HRegionInfo(htd.getName(), Bytes.toBytes("aaa"), Bytes.toBytes("ccc"));
  HRegionInfo splitb = new HRegionInfo(htd.getName(), Bytes.toBytes("ccc"), Bytes.toBytes("eee"));
  // Test that when both daughter regions are in place, that we do not
  // remove the parent.
  List<KeyValue> kvs = new ArrayList<KeyValue>();
  kvs.add(new KeyValue(parent.getRegionName(), HConstants.CATALOG_FAMILY,
      HConstants.SPLITA_QUALIFIER, Writables.getBytes(splita)));
  kvs.add(new KeyValue(parent.getRegionName(), HConstants.CATALOG_FAMILY,
      HConstants.SPLITB_QUALIFIER, Writables.getBytes(splitb)));
  Result r = new Result(kvs);

  FileSystem fs = FileSystem.get(htu.getConfiguration());

  Path rootdir = services.getMasterFileSystem().getRootDir();
  // have to set the root directory since we use it in HFileDisposer to figure out to get to the
  // archive directory. Otherwise, it just seems to pick the first root directory it can find (so
  // the single test passes, but when the full suite is run, things get borked).
  FSUtils.setRootDir(fs.getConf(), rootdir);
  Path tabledir = HTableDescriptor.getTableDir(rootdir, parent.getTableName());
  Path storedir = Store.getStoreHomedir(tabledir, parent.getEncodedName(),
    htd.getColumnFamilies()[0].getName());
  System.out.println("Old root:" + rootdir);
  System.out.println("Old table:" + tabledir);
  System.out.println("Old store:" + storedir);

  Path storeArchive = HFileArchiveUtil.getStoreArchivePath(services.getConfiguration(), parent,
    tabledir, htd.getColumnFamilies()[0].getName());
  System.out.println("Old archive:" + storeArchive);

  // enable archiving, make sure that files get archived
  addMockStoreFiles(2, services, storedir);
  // get the current store files for comparison
  FileStatus[] storeFiles = fs.listStatus(storedir);

  // do the cleaning of the parent
  assertTrue(janitor.cleanParent(parent, r));

  // and now check to make sure that the files have actually been archived
  FileStatus[] archivedStoreFiles = fs.listStatus(storeArchive);
  assertArchiveEqualToOriginal(storeFiles, archivedStoreFiles, fs);

  // now add store files with the same names as before to check backup
  // enable archiving, make sure that files get archived
  addMockStoreFiles(2, services, storedir);

  // do the cleaning of the parent
  assertTrue(janitor.cleanParent(parent, r));

  // and now check to make sure that the files have actually been archived
  archivedStoreFiles = fs.listStatus(storeArchive);
  assertArchiveEqualToOriginal(storeFiles, archivedStoreFiles, fs, true);

  // cleanup
  services.stop("Test finished");
  server.stop("shutdown");
  janitor.join();
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:77,代码来源:TestCatalogJanitor.java

示例6: testExcludeAllFromMinorCompaction

import org.apache.hadoop.hbase.regionserver.Store; //导入方法依赖的package包/类
/**
 * This test is to test the scenario happened in HBASE-6901.
 * All files are bulk loaded and excluded from minor compaction.
 * Without the fix of HBASE-6901, an ArrayIndexOutOfBoundsException
 * will be thrown.
 */
@Test
public void testExcludeAllFromMinorCompaction() throws Exception {
  Configuration conf = util.getConfiguration();
  conf.setInt("hbase.hstore.compaction.min", 2);
  generateRandomStartKeys(5);

  try {
    util.startMiniCluster();
    final FileSystem fs = util.getDFSCluster().getFileSystem();
    HBaseAdmin admin = new HBaseAdmin(conf);
    HTable table = util.createTable(TABLE_NAME, FAMILIES);
    assertEquals("Should start with empty table", 0, util.countRows(table));

    // deep inspection: get the StoreFile dir
    final Path storePath = Store.getStoreHomedir(
        HTableDescriptor.getTableDir(FSUtils.getRootDir(conf), TABLE_NAME),
        admin.getTableRegions(TABLE_NAME).get(0).getEncodedName(),
        FAMILIES[0]);
    assertEquals(0, fs.listStatus(storePath).length);

    // Generate two bulk load files
    conf.setBoolean("hbase.mapreduce.hfileoutputformat.compaction.exclude",
        true);
    util.startMiniMapReduceCluster();

    for (int i = 0; i < 2; i++) {
      Path testDir = util.getDataTestDir("testExcludeAllFromMinorCompaction_" + i);
      runIncrementalPELoad(conf, table, testDir);
      // Perform the actual load
      new LoadIncrementalHFiles(conf).doBulkLoad(testDir, table);
    }

    // Ensure data shows up
    int expectedRows = 2 * NMapInputFormat.getNumMapTasks(conf) * ROWSPERSPLIT;
    assertEquals("LoadIncrementalHFiles should put expected data in table",
        expectedRows, util.countRows(table));

    // should have a second StoreFile now
    assertEquals(2, fs.listStatus(storePath).length);

    // minor compactions shouldn't get rid of the file
    admin.compact(TABLE_NAME);
    try {
      quickPoll(new Callable<Boolean>() {
        public Boolean call() throws Exception {
          return fs.listStatus(storePath).length == 1;
        }
      }, 5000);
      throw new IOException("SF# = " + fs.listStatus(storePath).length);
    } catch (AssertionError ae) {
      // this is expected behavior
    }

    // a major compaction should work though
    admin.majorCompact(TABLE_NAME);
    quickPoll(new Callable<Boolean>() {
      public Boolean call() throws Exception {
        return fs.listStatus(storePath).length == 1;
      }
    }, 5000);

  } finally {
    util.shutdownMiniMapReduceCluster();
    util.shutdownMiniCluster();
  }
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:73,代码来源:TestHFileOutputFormat.java

示例7: testExcludeMinorCompaction

import org.apache.hadoop.hbase.regionserver.Store; //导入方法依赖的package包/类
@Test
public void testExcludeMinorCompaction() throws Exception {
  Configuration conf = util.getConfiguration();
  conf.setInt("hbase.hstore.compaction.min", 2);
  Path testDir = util.getDataTestDir("testExcludeMinorCompaction");
  generateRandomStartKeys(5);

  try {
    util.startMiniCluster();
    final FileSystem fs = util.getDFSCluster().getFileSystem();
    HBaseAdmin admin = new HBaseAdmin(conf);
    HTable table = util.createTable(TABLE_NAME, FAMILIES);
    assertEquals("Should start with empty table", 0, util.countRows(table));

    // deep inspection: get the StoreFile dir
    final Path storePath = Store.getStoreHomedir(
        HTableDescriptor.getTableDir(FSUtils.getRootDir(conf), TABLE_NAME),
        admin.getTableRegions(TABLE_NAME).get(0).getEncodedName(),
        FAMILIES[0]);
    assertEquals(0, fs.listStatus(storePath).length);

    // put some data in it and flush to create a storefile
    Put p = new Put(Bytes.toBytes("test"));
    p.add(FAMILIES[0], Bytes.toBytes("1"), Bytes.toBytes("1"));
    table.put(p);
    admin.flush(TABLE_NAME);
    assertEquals(1, util.countRows(table));
    quickPoll(new Callable<Boolean>() {
      public Boolean call() throws Exception {
        return fs.listStatus(storePath).length == 1;
      }
    }, 5000);

    // Generate a bulk load file with more rows
    conf.setBoolean("hbase.mapreduce.hfileoutputformat.compaction.exclude",
        true);
    util.startMiniMapReduceCluster();
    runIncrementalPELoad(conf, table, testDir);

    // Perform the actual load
    new LoadIncrementalHFiles(conf).doBulkLoad(testDir, table);

    // Ensure data shows up
    int expectedRows = NMapInputFormat.getNumMapTasks(conf) * ROWSPERSPLIT;
    assertEquals("LoadIncrementalHFiles should put expected data in table",
        expectedRows + 1, util.countRows(table));

    // should have a second StoreFile now
    assertEquals(2, fs.listStatus(storePath).length);

    // minor compactions shouldn't get rid of the file
    admin.compact(TABLE_NAME);
    try {
      quickPoll(new Callable<Boolean>() {
        public Boolean call() throws Exception {
          return fs.listStatus(storePath).length == 1;
        }
      }, 5000);
      throw new IOException("SF# = " + fs.listStatus(storePath).length);
    } catch (AssertionError ae) {
      // this is expected behavior
    }

    // a major compaction should work though
    admin.majorCompact(TABLE_NAME);
    quickPoll(new Callable<Boolean>() {
      public Boolean call() throws Exception {
        return fs.listStatus(storePath).length == 1;
      }
    }, 5000);

  } finally {
    util.shutdownMiniMapReduceCluster();
    util.shutdownMiniCluster();
  }
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:77,代码来源:TestHFileOutputFormat.java

示例8: getStoreArchivePath

import org.apache.hadoop.hbase.regionserver.Store; //导入方法依赖的package包/类
/**
 * Get the directory to archive a store directory
 * @param conf {@link Configuration} to read for the archive directory name. Can be null.
 * @param region parent region information under which the store currently lives
 * @param tabledir directory for the table under which the store currently lives
 * @param family name of the family in the store
 * @return {@link Path} to the directory to archive the given store or <tt>null</tt> if it should
 *         not be archived
 */
public static Path getStoreArchivePath(Configuration conf, HRegionInfo region, Path tabledir,
    byte[] family) {
  Path tableArchiveDir = getTableArchivePath(tabledir);
  return Store.getStoreHomedir(tableArchiveDir,
    HRegionInfo.encodeRegionName(region.getRegionName()), family);
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:16,代码来源:HFileArchiveUtil.java

示例9: getStoreSnapshotDirectory

import org.apache.hadoop.hbase.regionserver.Store; //导入方法依赖的package包/类
/**
 * Get the home directory for store-level snapshot files.
 * <p>
 * Specific files per store are kept in a similar layout as per the current directory layout.
 * @param regionDir snapshot directory for the parent region, <b>not</b> the standard region
 *          directory. See {@link #getRegionSnapshotDirectory}
 * @param family name of the store to snapshot
 * @return path to the snapshot home directory for the store/family
 */
public static Path getStoreSnapshotDirectory(Path regionDir, String family) {
  return Store.getStoreHomedir(regionDir, Bytes.toBytes(family));
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:13,代码来源:TakeSnapshotUtils.java


注:本文中的org.apache.hadoop.hbase.regionserver.Store.getStoreHomedir方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。