当前位置: 首页>>代码示例>>Java>>正文


Java FSUtils.delete方法代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.util.FSUtils.delete方法的典型用法代码示例。如果您正苦于以下问题:Java FSUtils.delete方法的具体用法?Java FSUtils.delete怎么用?Java FSUtils.delete使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hbase.util.FSUtils的用法示例。


在下文中一共展示了FSUtils.delete方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: tearDown

import org.apache.hadoop.hbase.util.FSUtils; //导入方法依赖的package包/类
@After
public void tearDown() throws Exception {
  try {
    FileSystem fs = UTIL.getTestFileSystem();
    // cleanup each of the files/directories registered
    for (Path file : toCleanup) {
    // remove the table and archive directories
      FSUtils.delete(fs, file, true);
    }
  } catch (IOException e) {
    LOG.warn("Failure to delete archive directory", e);
  } finally {
    toCleanup.clear();
  }
  // make sure that backups are off for all tables
  archivingClient.disableHFileBackup();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:18,代码来源:TestZooKeeperTableArchiveClient.java

示例2: removeTableInfoInPre96Format

import org.apache.hadoop.hbase.util.FSUtils; //导入方法依赖的package包/类
/**
 * Removes .tableinfo files that are laid in pre-96 format (i.e., the tableinfo files are under
 * table directory).
 * @param tableName
 * @throws IOException
 */
private void removeTableInfoInPre96Format(TableName tableName) throws IOException {
  Path tableDir = FSUtils.getTableDir(rootDir, tableName);
  FileStatus[] status = FSUtils.listStatus(fs, tableDir, TABLEINFO_PATHFILTER);
  if (status == null) return;
  for (FileStatus fStatus : status) {
    FSUtils.delete(fs, fStatus.getPath(), false);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:15,代码来源:NamespaceUpgrade.java

示例3: writeRegionInfoOnFilesystem

import org.apache.hadoop.hbase.util.FSUtils; //导入方法依赖的package包/类
/**
 * Write out an info file under the region directory. Useful recovering mangled regions.
 *
 * @param regionInfoContent serialized version of the {@link HRegionInfo}
 * @param useTempDir        indicate whether or not using the region .tmp dir for a safer file creation.
 */
private void writeRegionInfoOnFilesystem(final byte[] regionInfoContent, final boolean useTempDir)
    throws IOException {
  Path regionInfoFile = new Path(getRegionDir(), REGION_INFO_FILE);
  if (useTempDir) {
    // Create in tmpDir and then move into place in case we crash after
    // create but before close. If we don't successfully close the file,
    // subsequent region reopens will fail the below because create is
    // registered in NN.

    // And then create the file
    Path tmpPath = new Path(getTempDir(), REGION_INFO_FILE);

    // If datanode crashes or if the RS goes down just before the close is
    // called while trying to
    // close the created regioninfo file in the .tmp directory then on next
    // creation we will be getting AlreadyCreatedException.
    // Hence delete and create the file if exists.
    if (FSUtils.isExists(fs, tmpPath)) {
      FSUtils.delete(fs, tmpPath, true);
    }

    // Write HRI to a file in case we need to recover hbase:meta
    writeRegionInfoFileContent(conf, fs, tmpPath, regionInfoContent);

    // Move the created file to the original path
    if (fs.exists(tmpPath) && !rename(tmpPath, regionInfoFile)) {
      throw new IOException("Unable to rename " + tmpPath + " to " + regionInfoFile);
    }
  } else {
    // Write HRI to a file in case we need to recover hbase:meta
    writeRegionInfoFileContent(conf, fs, regionInfoFile, regionInfoContent);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:40,代码来源:HRegionFileSystem.java

示例4: testArchiveOldRegion

import org.apache.hadoop.hbase.util.FSUtils; //导入方法依赖的package包/类
@Test
public void testArchiveOldRegion() throws Exception {
  String table = "table";
  HBaseTestingUtility htu = new HBaseTestingUtility();
  setRootDirAndCleanIt(htu, "testCleanParent");
  Server server = new MockServer(htu);
  MasterServices services = new MockMasterServices(server);

  // create the janitor
  CatalogJanitor janitor = new CatalogJanitor(server, services);

  // Create regions.
  HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(table));
  htd.addFamily(new HColumnDescriptor("f"));
  HRegionInfo parent = new HRegionInfo(htd.getTableName(),
      Bytes.toBytes("aaa"), Bytes.toBytes("eee"));
  HRegionInfo splita = new HRegionInfo(htd.getTableName(),
      Bytes.toBytes("aaa"), Bytes.toBytes("ccc"));
  HRegionInfo splitb = new HRegionInfo(htd.getTableName(),
      Bytes.toBytes("ccc"),
      Bytes.toBytes("eee"));

  // Test that when both daughter regions are in place, that we do not
  // remove the parent.
  Result parentMetaRow = createResult(parent, splita, splitb);
  FileSystem fs = FileSystem.get(htu.getConfiguration());
  Path rootdir = services.getMasterFileSystem().getRootDir();
  // have to set the root directory since we use it in HFileDisposer to figure out to get to the
  // archive directory. Otherwise, it just seems to pick the first root directory it can find (so
  // the single test passes, but when the full suite is run, things get borked).
  FSUtils.setRootDir(fs.getConf(), rootdir);
  Path tabledir = FSUtils.getTableDir(rootdir, htd.getTableName());
  Path storedir = HStore.getStoreHomedir(tabledir, parent, htd.getColumnFamilies()[0].getName());
  Path storeArchive = HFileArchiveUtil.getStoreArchivePath(services.getConfiguration(), parent,
    tabledir, htd.getColumnFamilies()[0].getName());
  LOG.debug("Table dir:" + tabledir);
  LOG.debug("Store dir:" + storedir);
  LOG.debug("Store archive dir:" + storeArchive);

  // add a couple of store files that we can check for
  FileStatus[] mockFiles = addMockStoreFiles(2, services, storedir);
  // get the current store files for comparison
  FileStatus[] storeFiles = fs.listStatus(storedir);
  int index = 0;
  for (FileStatus file : storeFiles) {
    LOG.debug("Have store file:" + file.getPath());
    assertEquals("Got unexpected store file", mockFiles[index].getPath(),
      storeFiles[index].getPath());
    index++;
  }

  // do the cleaning of the parent
  assertTrue(janitor.cleanParent(parent, parentMetaRow));
  LOG.debug("Finished cleanup of parent region");

  // and now check to make sure that the files have actually been archived
  FileStatus[] archivedStoreFiles = fs.listStatus(storeArchive);
  logFiles("archived files", storeFiles);
  logFiles("archived files", archivedStoreFiles);

  assertArchiveEqualToOriginal(storeFiles, archivedStoreFiles, fs);

  // cleanup
  FSUtils.delete(fs, rootdir, true);
  services.stop("Test finished");
  server.stop("Test finished");
  janitor.cancel(true);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:69,代码来源:TestCatalogJanitor.java


注:本文中的org.apache.hadoop.hbase.util.FSUtils.delete方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。