当前位置: 首页>>代码示例>>Java>>正文


Java FSUtils.getTableDir方法代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.util.FSUtils.getTableDir方法的典型用法代码示例。如果您正苦于以下问题:Java FSUtils.getTableDir方法的具体用法?Java FSUtils.getTableDir怎么用?Java FSUtils.getTableDir使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hbase.util.FSUtils的用法示例。


在下文中一共展示了FSUtils.getTableDir方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: deleteFamilyFromFS

import org.apache.hadoop.hbase.util.FSUtils; //导入方法依赖的package包/类
public void deleteFamilyFromFS(HRegionInfo region, byte[] familyName)
    throws IOException {
  // archive family store files
  Path tableDir = FSUtils.getTableDir(rootdir, region.getTable());
  HFileArchiver.archiveFamily(fs, conf, region, tableDir, familyName);

  // delete the family folder
  Path familyDir = new Path(tableDir,
    new Path(region.getEncodedName(), Bytes.toString(familyName)));
  if (fs.delete(familyDir, true) == false) {
    if (fs.exists(familyDir)) {
      throw new IOException("Could not delete family "
          + Bytes.toString(familyName) + " from FileSystem for region "
          + region.getRegionNameAsString() + "(" + region.getEncodedName()
          + ")");
    }
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:19,代码来源:MasterFileSystem.java

示例2: cleanMergeRegion

import org.apache.hadoop.hbase.util.FSUtils; //导入方法依赖的package包/类
/**
 * If merged region no longer holds reference to the merge regions, archive
 * merge region on hdfs and perform deleting references in hbase:meta
 * @param mergedRegion
 * @param regionA
 * @param regionB
 * @return true if we delete references in merged region on hbase:meta and archive
 *         the files on the file system
 * @throws IOException
 */
boolean cleanMergeRegion(final HRegionInfo mergedRegion,
    final HRegionInfo regionA, final HRegionInfo regionB) throws IOException {
  FileSystem fs = this.services.getMasterFileSystem().getFileSystem();
  Path rootdir = this.services.getMasterFileSystem().getRootDir();
  Path tabledir = FSUtils.getTableDir(rootdir, mergedRegion.getTable());
  HTableDescriptor htd = getTableDescriptor(mergedRegion.getTable());
  HRegionFileSystem regionFs = null;
  try {
    regionFs = HRegionFileSystem.openRegionFromFileSystem(
        this.services.getConfiguration(), fs, tabledir, mergedRegion, true);
  } catch (IOException e) {
    LOG.warn("Merged region does not exist: " + mergedRegion.getEncodedName());
  }
  if (regionFs == null || !regionFs.hasReferences(htd)) {
    LOG.debug("Deleting region " + regionA.getRegionNameAsString() + " and "
        + regionB.getRegionNameAsString()
        + " from fs because merged region no longer holds references");
    HFileArchiver.archiveRegion(this.services.getConfiguration(), fs, regionA);
    HFileArchiver.archiveRegion(this.services.getConfiguration(), fs, regionB);
    MetaTableAccessor.deleteMergeQualifiers(server.getConnection(),
      mergedRegion);
    return true;
  }
  return false;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:36,代码来源:CatalogJanitor.java

示例3: validateColumnFamilyDeletion

import org.apache.hadoop.hbase.util.FSUtils; //导入方法依赖的package包/类
public static void validateColumnFamilyDeletion(final HMaster master, final TableName tableName,
    final String family) throws IOException {
  // verify htd
  HTableDescriptor htd = master.getTableDescriptors().get(tableName);
  assertTrue(htd != null);
  assertFalse(htd.hasFamily(family.getBytes()));

  // verify fs
  final FileSystem fs = master.getMasterFileSystem().getFileSystem();
  final Path tableDir = FSUtils.getTableDir(master.getMasterFileSystem().getRootDir(), tableName);
  for (Path regionDir: FSUtils.getRegionDirs(fs, tableDir)) {
    final Path familyDir = new Path(regionDir, family);
    assertFalse(family + " family dir should not exist", fs.exists(familyDir));
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:16,代码来源:MasterProcedureTestingUtility.java

示例4: testReplayWorksThoughLotsOfFlushing

import org.apache.hadoop.hbase.util.FSUtils; //导入方法依赖的package包/类
/**
 * HBASE-12782 ITBLL fails for me if generator does anything but 5M per maptask.
 * Create a region. Close it. Then copy into place a file to replay, one that is bigger than
 * configured flush size so we bring on lots of flushes.  Then reopen and confirm all edits
 * made it in.
 * @throws IOException
 */
@Test (timeout=60000)
public void testReplayWorksThoughLotsOfFlushing() throws IOException {
  Configuration conf = new Configuration(TEST_UTIL.getConfiguration());
  // Set it so we flush every 1M or so.  Thats a lot.
  conf.setInt(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 1024*1024);
  // The file of recovered edits has a column family of 'meta'. Also has an encoded regionname
  // of 4823016d8fca70b25503ee07f4c6d79f which needs to match on replay.
  final String encodedRegionName = "4823016d8fca70b25503ee07f4c6d79f";
  HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(testName.getMethodName()));
  final String columnFamily = "meta";
  byte [][] columnFamilyAsByteArray = new byte [][] {Bytes.toBytes(columnFamily)};
  htd.addFamily(new HColumnDescriptor(columnFamily));
  HRegionInfo hri = new HRegionInfo(htd.getTableName()) {
    @Override
    public synchronized String getEncodedName() {
      return encodedRegionName;
    }

    // Cache the name because lots of lookups.
    private byte [] encodedRegionNameAsBytes = null;
    @Override
    public synchronized byte[] getEncodedNameAsBytes() {
      if (encodedRegionNameAsBytes == null) {
        this.encodedRegionNameAsBytes = Bytes.toBytes(getEncodedName());
      }
      return this.encodedRegionNameAsBytes;
    }
  };
  Path hbaseRootDir = TEST_UTIL.getDataTestDir();
  FileSystem fs = FileSystem.get(TEST_UTIL.getConfiguration());
  Path tableDir = FSUtils.getTableDir(hbaseRootDir, htd.getTableName());
  HRegionFileSystem hrfs =
      new HRegionFileSystem(TEST_UTIL.getConfiguration(), fs, tableDir, hri);
  if (fs.exists(hrfs.getRegionDir())) {
    LOG.info("Region directory already exists. Deleting.");
    fs.delete(hrfs.getRegionDir(), true);
  }
  HRegion region = HRegion.createHRegion(hri, hbaseRootDir, conf, htd, null);
  assertEquals(encodedRegionName, region.getRegionInfo().getEncodedName());
  List<String> storeFiles = region.getStoreFileList(columnFamilyAsByteArray);
  // There should be no store files.
  assertTrue(storeFiles.isEmpty());
  region.close();
  Path regionDir = region.getRegionDir(hbaseRootDir, hri);
  Path recoveredEditsDir = WALSplitter.getRegionDirRecoveredEditsDir(regionDir);
  // This is a little fragile getting this path to a file of 10M of edits.
  Path recoveredEditsFile = new Path(
    System.getProperty("test.build.classes", "target/test-classes"),
      "0000000000000016310");
  // Copy this file under the region's recovered.edits dir so it is replayed on reopen.
  Path destination = new Path(recoveredEditsDir, recoveredEditsFile.getName());
  fs.copyToLocalFile(recoveredEditsFile, destination);
  assertTrue(fs.exists(destination));
  // Now the file 0000000000000016310 is under recovered.edits, reopen the region to replay.
  region = HRegion.openHRegion(region, null);
  assertEquals(encodedRegionName, region.getRegionInfo().getEncodedName());
  storeFiles = region.getStoreFileList(columnFamilyAsByteArray);
  // Our 0000000000000016310 is 10MB. Most of the edits are for one region. Lets assume that if
  // we flush at 1MB, that there are at least 3 flushed files that are there because of the
  // replay of edits.
  assertTrue("Files count=" + storeFiles.size(), storeFiles.size() > 10);
  // Now verify all edits made it into the region.
  int count = verifyAllEditsMadeItIn(fs, conf, recoveredEditsFile, region);
  LOG.info("Checked " + count + " edits made it in");
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:73,代码来源:TestRecoveredEdits.java

示例5: init

import org.apache.hadoop.hbase.util.FSUtils; //导入方法依赖的package包/类
@SuppressWarnings("deprecation")
private Store init(String methodName, Configuration conf, HTableDescriptor htd,
    HColumnDescriptor hcd) throws IOException {
  //Setting up a Store
  Path basedir = new Path(DIR+methodName);
  Path tableDir = FSUtils.getTableDir(basedir, htd.getTableName());
  final Path logdir = new Path(basedir, DefaultWALProvider.getWALDirectoryName(methodName));

  FileSystem fs = FileSystem.get(conf);

  fs.delete(logdir, true);

  if (htd.hasFamily(hcd.getName())) {
    htd.modifyFamily(hcd);
  } else {
    htd.addFamily(hcd);
  }
  HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false);
  final Configuration walConf = new Configuration(conf);
  FSUtils.setRootDir(walConf, basedir);
  final WALFactory wals = new WALFactory(walConf, null, methodName);
  HRegion region = new HRegion(tableDir, wals.getWAL(info.getEncodedNameAsBytes()), fs, conf,
      info, htd, null);

  store = new HStore(region, hcd, conf);
  return store;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:28,代码来源:TestStore.java

示例6: getSplits

import org.apache.hadoop.hbase.util.FSUtils; //导入方法依赖的package包/类
public static List<InputSplit> getSplits(Scan scan, SnapshotManifest manifest,
    List<HRegionInfo> regionManifests, Path restoreDir, Configuration conf) throws IOException {
  // load table descriptor
  HTableDescriptor htd = manifest.getTableDescriptor();

  Path tableDir = FSUtils.getTableDir(restoreDir, htd.getTableName());

  List<InputSplit> splits = new ArrayList<InputSplit>();
  for (HRegionInfo hri : regionManifests) {
    // load region descriptor

    if (CellUtil.overlappingKeys(scan.getStartRow(), scan.getStopRow(), hri.getStartKey(),
        hri.getEndKey())) {
      // compute HDFS locations from snapshot files (which will get the locations for
      // referred hfiles)
      List<String> hosts = getBestLocations(conf,
          HRegion.computeHDFSBlocksDistribution(conf, htd, hri, tableDir));

      int len = Math.min(3, hosts.size());
      hosts = hosts.subList(0, len);
      splits.add(new InputSplit(htd, hri, hosts, scan, restoreDir));
    }
  }

  return splits;

}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:28,代码来源:TableSnapshotInputFormatImpl.java

示例7: testCreateTableWithSplitRegion

import org.apache.hadoop.hbase.util.FSUtils; //导入方法依赖的package包/类
@Test (timeout=300000)
public void testCreateTableWithSplitRegion() throws Exception {
  final TableName tableName = TableName.valueOf("testCreateTableWithSplitRegion");
  final MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
  final HMaster m = cluster.getMaster();
  final HTableDescriptor desc = new HTableDescriptor(tableName);
  desc.addFamily(new HColumnDescriptor(FAMILYNAME));
  byte[] splitPoint = Bytes.toBytes("split-point");
  long ts = System.currentTimeMillis();
  HRegionInfo d1 = new HRegionInfo(desc.getTableName(), null, splitPoint, false, ts);
  HRegionInfo d2 = new HRegionInfo(desc.getTableName(), splitPoint, null, false, ts + 1);
  HRegionInfo parent = new HRegionInfo(desc.getTableName(), null, null, true, ts + 2);
  parent.setOffline(true);

  Path tempdir = m.getMasterFileSystem().getTempDir();
  FileSystem fs = m.getMasterFileSystem().getFileSystem();
  Path tempTableDir = FSUtils.getTableDir(tempdir, desc.getTableName());
  fs.delete(tempTableDir, true); // Clean up temp table dir if exists

  final HRegionInfo[] hRegionInfos = new HRegionInfo[] {d1, d2, parent};
  CreateTableHandler handler = new CreateTableHandler(m, m.getMasterFileSystem(),
    desc, cluster.getConfiguration(), hRegionInfos, m);
  handler.prepare();
  handler.process();
  for (int i = 0; i < 100; i++) {
    if (!TEST_UTIL.getHBaseAdmin().isTableAvailable(tableName)) {
      Thread.sleep(300);
    }
  }
  assertTrue(TEST_UTIL.getHBaseAdmin().isTableEnabled(tableName));
  assertTrue(TEST_UTIL.getHBaseAdmin().isTableAvailable(tableName));
  assertTrue(TEST_UTIL.getHBaseAdmin().isTableAvailable(tableName, new byte[][] { splitPoint }));
  RegionStates regionStates = m.getAssignmentManager().getRegionStates();
  assertTrue("Parent should be in SPLIT state",
    regionStates.isRegionInState(parent, State.SPLIT));
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:37,代码来源:TestCreateTableHandler.java

示例8: getFamiliesFromFS

import org.apache.hadoop.hbase.util.FSUtils; //导入方法依赖的package包/类
private Set<String> getFamiliesFromFS(final TableName tableName) throws IOException {
  MasterFileSystem mfs = TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterFileSystem();
  Set<String> families = new HashSet<String>();
  Path tableDir = FSUtils.getTableDir(mfs.getRootDir(), tableName);
  for (Path regionDir: FSUtils.getRegionDirs(mfs.getFileSystem(), tableDir)) {
    for (Path familyDir: FSUtils.getFamilyDirs(mfs.getFileSystem(), regionDir)) {
      families.add(familyDir.getName());
    }
  }
  return families;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:12,代码来源:TestRestoreSnapshotFromClient.java

示例9: setupTable

import org.apache.hadoop.hbase.util.FSUtils; //导入方法依赖的package包/类
private void setupTable(final TableName tableName) throws IOException {
  // load the table
  Table table = UTIL.createTable(tableName, FAMILY_NAME);
  try {
    rowCount = 0;
    byte[] value = new byte[1024];
    byte[] q = Bytes.toBytes("q");
    while (rowCount < NUM_ROWS) {
      Put put = new Put(Bytes.toBytes(String.format("%010d", rowCount)));
      put.setDurability(Durability.SKIP_WAL);
      put.add(FAMILY_NAME, q, value);
      table.put(put);

      if ((rowCount++ % ROW_PER_FILE) == 0) {
        // flush it
        ((HTable)table).flushCommits();
        UTIL.getHBaseAdmin().flush(tableName);
      }
    }
  } finally {
    UTIL.getHBaseAdmin().flush(tableName);
    table.close();
  }

  assertEquals(NUM_ROWS, rowCount);

  // get the store file paths
  storeFiles.clear();
  tableDir = FSUtils.getTableDir(getRootDir(), tableName);
  FSVisitor.visitTableStoreFiles(getFileSystem(), tableDir, new FSVisitor.StoreFileVisitor() {
    @Override
    public void storeFile(final String region, final String family, final String hfile)
        throws IOException {
      HFileLink link = HFileLink.build(UTIL.getConfiguration(), tableName, region, family, hfile);
      storeFiles.add(link.getOriginPath());
    }
  });
  assertTrue("Expected at least " + NUM_FILES + " store files", storeFiles.size() >= NUM_FILES);
  LOG.info("Store files: " + storeFiles);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:41,代码来源:TestCorruptedRegionStoreFile.java

示例10: testExcludeAllFromMinorCompaction

import org.apache.hadoop.hbase.util.FSUtils; //导入方法依赖的package包/类
/**
 * This test is to test the scenario happened in HBASE-6901.
 * All files are bulk loaded and excluded from minor compaction.
 * Without the fix of HBASE-6901, an ArrayIndexOutOfBoundsException
 * will be thrown.
 */
@Ignore ("Flakey: See HBASE-9051") @Test
public void testExcludeAllFromMinorCompaction() throws Exception {
  Configuration conf = util.getConfiguration();
  conf.setInt("hbase.hstore.compaction.min", 2);
  generateRandomStartKeys(5);
  util.setJobWithoutMRCluster();
  util.startMiniCluster();
  try (Connection conn = ConnectionFactory.createConnection();
      Admin admin = conn.getAdmin()) {
    final FileSystem fs = util.getDFSCluster().getFileSystem();
    HTable table = util.createTable(TABLE_NAME, FAMILIES);
    assertEquals("Should start with empty table", 0, util.countRows(table));

    // deep inspection: get the StoreFile dir
    final Path storePath = new Path(
      FSUtils.getTableDir(FSUtils.getRootDir(conf), TABLE_NAME),
        new Path(admin.getTableRegions(TABLE_NAME).get(0).getEncodedName(),
          Bytes.toString(FAMILIES[0])));
    assertEquals(0, fs.listStatus(storePath).length);

    // Generate two bulk load files
    conf.setBoolean("hbase.mapreduce.hfileoutputformat.compaction.exclude",
        true);

    for (int i = 0; i < 2; i++) {
      Path testDir = util.getDataTestDirOnTestFS("testExcludeAllFromMinorCompaction_" + i);
      runIncrementalPELoad(conf, table.getTableDescriptor(), conn.getRegionLocator(TABLE_NAME),
          testDir);
      // Perform the actual load
      new LoadIncrementalHFiles(conf).doBulkLoad(testDir, table);
    }

    // Ensure data shows up
    int expectedRows = 2 * NMapInputFormat.getNumMapTasks(conf) * ROWSPERSPLIT;
    assertEquals("LoadIncrementalHFiles should put expected data in table",
        expectedRows, util.countRows(table));

    // should have a second StoreFile now
    assertEquals(2, fs.listStatus(storePath).length);

    // minor compactions shouldn't get rid of the file
    admin.compact(TABLE_NAME);
    try {
      quickPoll(new Callable<Boolean>() {
        @Override
        public Boolean call() throws Exception {
          return fs.listStatus(storePath).length == 1;
        }
      }, 5000);
      throw new IOException("SF# = " + fs.listStatus(storePath).length);
    } catch (AssertionError ae) {
      // this is expected behavior
    }

    // a major compaction should work though
    admin.majorCompact(TABLE_NAME);
    quickPoll(new Callable<Boolean>() {
      @Override
      public Boolean call() throws Exception {
        return fs.listStatus(storePath).length == 1;
      }
    }, 5000);

  } finally {
    util.shutdownMiniCluster();
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:74,代码来源:TestHFileOutputFormat2.java

示例11: testExcludeMinorCompaction

import org.apache.hadoop.hbase.util.FSUtils; //导入方法依赖的package包/类
@Ignore("Goes zombie too frequently; needs work. See HBASE-14563") @Test
public void testExcludeMinorCompaction() throws Exception {
  Configuration conf = util.getConfiguration();
  conf.setInt("hbase.hstore.compaction.min", 2);
  generateRandomStartKeys(5);
  util.setJobWithoutMRCluster();
  util.startMiniCluster();
  try (Connection conn = ConnectionFactory.createConnection(conf);
      Admin admin = conn.getAdmin()){
    Path testDir = util.getDataTestDirOnTestFS("testExcludeMinorCompaction");
    final FileSystem fs = util.getDFSCluster().getFileSystem();
    Table table = util.createTable(TABLE_NAME, FAMILIES);
    assertEquals("Should start with empty table", 0, util.countRows(table));

    // deep inspection: get the StoreFile dir
    final Path storePath = new Path(
      FSUtils.getTableDir(FSUtils.getRootDir(conf), TABLE_NAME),
        new Path(admin.getTableRegions(TABLE_NAME).get(0).getEncodedName(),
          Bytes.toString(FAMILIES[0])));
    assertEquals(0, fs.listStatus(storePath).length);

    // put some data in it and flush to create a storefile
    Put p = new Put(Bytes.toBytes("test"));
    p.add(FAMILIES[0], Bytes.toBytes("1"), Bytes.toBytes("1"));
    table.put(p);
    admin.flush(TABLE_NAME);
    assertEquals(1, util.countRows(table));
    quickPoll(new Callable<Boolean>() {
      @Override
      public Boolean call() throws Exception {
        return fs.listStatus(storePath).length == 1;
      }
    }, 5000);

    // Generate a bulk load file with more rows
    conf.setBoolean("hbase.mapreduce.hfileoutputformat.compaction.exclude",
        true);

    RegionLocator regionLocator = conn.getRegionLocator(TABLE_NAME);
    runIncrementalPELoad(conf, table.getTableDescriptor(), regionLocator, testDir);

    // Perform the actual load
    new LoadIncrementalHFiles(conf).doBulkLoad(testDir, admin, table, regionLocator);

    // Ensure data shows up
    int expectedRows = NMapInputFormat.getNumMapTasks(conf) * ROWSPERSPLIT;
    assertEquals("LoadIncrementalHFiles should put expected data in table",
        expectedRows + 1, util.countRows(table));

    // should have a second StoreFile now
    assertEquals(2, fs.listStatus(storePath).length);

    // minor compactions shouldn't get rid of the file
    admin.compact(TABLE_NAME);
    try {
      quickPoll(new Callable<Boolean>() {
        @Override
        public Boolean call() throws Exception {
          return fs.listStatus(storePath).length == 1;
        }
      }, 5000);
      throw new IOException("SF# = " + fs.listStatus(storePath).length);
    } catch (AssertionError ae) {
      // this is expected behavior
    }

    // a major compaction should work though
    admin.majorCompact(TABLE_NAME);
    quickPoll(new Callable<Boolean>() {
      @Override
      public Boolean call() throws Exception {
        return fs.listStatus(storePath).length == 1;
      }
    }, 5000);

  } finally {
    util.shutdownMiniCluster();
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:80,代码来源:TestHFileOutputFormat2.java

示例12: deleteFromFs

import org.apache.hadoop.hbase.util.FSUtils; //导入方法依赖的package包/类
protected static void deleteFromFs(final MasterProcedureEnv env,
    final TableName tableName, final List<HRegionInfo> regions,
    final boolean archive) throws IOException {
  final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem();
  final FileSystem fs = mfs.getFileSystem();
  final Path tempdir = mfs.getTempDir();

  final Path tableDir = FSUtils.getTableDir(mfs.getRootDir(), tableName);
  final Path tempTableDir = FSUtils.getTableDir(tempdir, tableName);

  if (fs.exists(tableDir)) {
    // Ensure temp exists
    if (!fs.exists(tempdir) && !fs.mkdirs(tempdir)) {
      throw new IOException("HBase temp directory '" + tempdir + "' creation failure.");
    }

    // Ensure parent exists
    if (!fs.exists(tempTableDir.getParent()) && !fs.mkdirs(tempTableDir.getParent())) {
      throw new IOException("HBase temp directory '" + tempdir + "' creation failure.");
    }

    // Move the table in /hbase/.tmp
    if (!fs.rename(tableDir, tempTableDir)) {
      if (fs.exists(tempTableDir)) {
        // TODO
        // what's in this dir? something old? probably something manual from the user...
        // let's get rid of this stuff...
        FileStatus[] files = fs.listStatus(tempdir);
        if (files != null && files.length > 0) {
          for (int i = 0; i < files.length; ++i) {
            if (!files[i].isDir()) continue;
            HFileArchiver.archiveRegion(fs, mfs.getRootDir(), tempTableDir, files[i].getPath());
          }
        }
        fs.delete(tempdir, true);
      }
      throw new IOException("Unable to move '" + tableDir + "' to temp '" + tempTableDir + "'");
    }
  }

  // Archive regions from FS (temp directory)
  if (archive) {
    for (HRegionInfo hri : regions) {
      LOG.debug("Archiving region " + hri.getRegionNameAsString() + " from FS");
      HFileArchiver.archiveRegion(fs, mfs.getRootDir(),
          tempTableDir, HRegion.getRegionDir(tempTableDir, hri.getEncodedName()));
    }
    LOG.debug("Table '" + tableName + "' archived!");
  }

  // Delete table directory from FS (temp directory)
  if (!fs.delete(tempTableDir, true) && fs.exists(tempTableDir)) {
    throw new IOException("Couldn't delete " + tempTableDir);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:56,代码来源:DeleteTableProcedure.java

示例13: testCleaningRace

import org.apache.hadoop.hbase.util.FSUtils; //导入方法依赖的package包/类
/**
 * Test HFileArchiver.resolveAndArchive() race condition HBASE-7643
 */
@Test
public void testCleaningRace() throws Exception {
  final long TEST_TIME = 20 * 1000;
  final ChoreService choreService = new ChoreService("TEST_SERVER_NAME");

  Configuration conf = UTIL.getMiniHBaseCluster().getMaster().getConfiguration();
  Path rootDir = UTIL.getDataTestDirOnTestFS("testCleaningRace");
  FileSystem fs = UTIL.getTestFileSystem();

  Path archiveDir = new Path(rootDir, HConstants.HFILE_ARCHIVE_DIRECTORY);
  Path regionDir = new Path(FSUtils.getTableDir(new Path("./"),
      TableName.valueOf("table")), "abcdef");
  Path familyDir = new Path(regionDir, "cf");

  Path sourceRegionDir = new Path(rootDir, regionDir);
  fs.mkdirs(sourceRegionDir);

  Stoppable stoppable = new StoppableImplementation();

  // The cleaner should be looping without long pauses to reproduce the race condition.
  HFileCleaner cleaner = new HFileCleaner(1, stoppable, conf, fs, archiveDir);
  try {
    choreService.scheduleChore(cleaner);

    // Keep creating/archiving new files while the cleaner is running in the other thread
    long startTime = System.currentTimeMillis();
    for (long fid = 0; (System.currentTimeMillis() - startTime) < TEST_TIME; ++fid) {
      Path file = new Path(familyDir,  String.valueOf(fid));
      Path sourceFile = new Path(rootDir, file);
      Path archiveFile = new Path(archiveDir, file);

      fs.createNewFile(sourceFile);

      try {
        // Try to archive the file
        HFileArchiver.archiveRegion(fs, rootDir,
            sourceRegionDir.getParent(), sourceRegionDir);

        // The archiver succeded, the file is no longer in the original location
        // but it's in the archive location.
        LOG.debug("hfile=" + fid + " should be in the archive");
        assertTrue(fs.exists(archiveFile));
        assertFalse(fs.exists(sourceFile));
      } catch (IOException e) {
        // The archiver is unable to archive the file. Probably HBASE-7643 race condition.
        // in this case, the file should not be archived, and we should have the file
        // in the original location.
        LOG.debug("hfile=" + fid + " should be in the source location");
        assertFalse(fs.exists(archiveFile));
        assertTrue(fs.exists(sourceFile));

        // Avoid to have this file in the next run
        fs.delete(sourceFile, false);
      }
    }
  } finally {
    stoppable.stop("test end");
    cleaner.cancel(true);
    choreService.shutdown();
    fs.delete(rootDir, true);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:66,代码来源:TestHFileArchiving.java

示例14: validateTableCreation

import org.apache.hadoop.hbase.util.FSUtils; //导入方法依赖的package包/类
public static void validateTableCreation(final HMaster master, final TableName tableName,
    final HRegionInfo[] regions, boolean hasFamilyDirs, String... family) throws IOException {
  // check filesystem
  final FileSystem fs = master.getMasterFileSystem().getFileSystem();
  final Path tableDir = FSUtils.getTableDir(master.getMasterFileSystem().getRootDir(), tableName);
  assertTrue(fs.exists(tableDir));
  FSUtils.logFileSystemState(fs, tableDir, LOG);
  List<Path> allRegionDirs = FSUtils.getRegionDirs(fs, tableDir);
  for (int i = 0; i < regions.length; ++i) {
    Path regionDir = new Path(tableDir, regions[i].getEncodedName());
    assertTrue(regions[i] + " region dir does not exist", fs.exists(regionDir));
    assertTrue(allRegionDirs.remove(regionDir));
    List<Path> allFamilyDirs = FSUtils.getFamilyDirs(fs, regionDir);
    for (int j = 0; j < family.length; ++j) {
      final Path familyDir = new Path(regionDir, family[j]);
      if (hasFamilyDirs) {
        assertTrue(family[j] + " family dir does not exist", fs.exists(familyDir));
        assertTrue(allFamilyDirs.remove(familyDir));
      } else {
        // TODO: WARN: Modify Table/Families does not create a family dir
        if (!fs.exists(familyDir)) {
          LOG.warn(family[j] + " family dir does not exist");
        }
        allFamilyDirs.remove(familyDir);
      }
    }
    assertTrue("found extraneous families: " + allFamilyDirs, allFamilyDirs.isEmpty());
  }
  assertTrue("found extraneous regions: " + allRegionDirs, allRegionDirs.isEmpty());

  // check meta
  assertTrue(MetaTableAccessor.tableExists(master.getConnection(), tableName));
  assertEquals(regions.length, countMetaRegions(master, tableName));

  // check htd
  HTableDescriptor htd = master.getTableDescriptors().get(tableName);
  assertTrue("table descriptor not found", htd != null);
  for (int i = 0; i < family.length; ++i) {
    assertTrue("family not found " + family[i], htd.getFamily(Bytes.toBytes(family[i])) != null);
  }
  assertEquals(family.length, htd.getFamilies().size());
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:43,代码来源:MasterProcedureTestingUtility.java

示例15: testOpenFailedUnrecoverable

import org.apache.hadoop.hbase.util.FSUtils; //导入方法依赖的package包/类
/**
 * This tests region open failure which is not recoverable
 */
@Test (timeout=60000)
public void testOpenFailedUnrecoverable() throws Exception {
  TableName table =
      TableName.valueOf("testOpenFailedUnrecoverable");
  try {
    HTableDescriptor desc = new HTableDescriptor(table);
    desc.addFamily(new HColumnDescriptor(FAMILY));
    admin.createTable(desc);

    Table meta = new HTable(conf, TableName.META_TABLE_NAME);
    HRegionInfo hri = new HRegionInfo(
      desc.getTableName(), Bytes.toBytes("A"), Bytes.toBytes("Z"));
    MetaTableAccessor.addRegionToMeta(meta, hri);

    FileSystem fs = FileSystem.get(conf);
    Path tableDir= FSUtils.getTableDir(FSUtils.getRootDir(conf), table);
    Path regionDir = new Path(tableDir, hri.getEncodedName());
    // create a file named the same as the region dir to
    // mess up with region opening
    fs.create(regionDir, true);

    HMaster master = TEST_UTIL.getHBaseCluster().getMaster();
    master.assignRegion(hri);
    AssignmentManager am = master.getAssignmentManager();
    assertFalse(am.waitForAssignment(hri));

    RegionState state = am.getRegionStates().getRegionState(hri);
    assertEquals(RegionState.State.FAILED_OPEN, state.getState());
    // Failed to open due to file system issue. Region state should
    // carry the opening region server so that we can force close it
    // later on before opening it again. See HBASE-9092.
    assertNotNull(state.getServerName());

    // remove the blocking file, so that region can be opened
    fs.delete(regionDir, true);
    master.assignRegion(hri);
    assertTrue(am.waitForAssignment(hri));

    ServerName serverName = master.getAssignmentManager().
      getRegionStates().getRegionServerOfRegion(hri);
    TEST_UTIL.assertRegionOnServer(hri, serverName, 6000);
  } finally {
    TEST_UTIL.deleteTable(table);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:49,代码来源:TestAssignmentManagerOnCluster.java


注:本文中的org.apache.hadoop.hbase.util.FSUtils.getTableDir方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。