当前位置: 首页>>代码示例>>Java>>正文


Java HBaseFileSystem.makeDirOnFileSystem方法代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.HBaseFileSystem.makeDirOnFileSystem方法的典型用法代码示例。如果您正苦于以下问题:Java HBaseFileSystem.makeDirOnFileSystem方法的具体用法?Java HBaseFileSystem.makeDirOnFileSystem怎么用?Java HBaseFileSystem.makeDirOnFileSystem使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hbase.HBaseFileSystem的用法示例。


在下文中一共展示了HBaseFileSystem.makeDirOnFileSystem方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: createInitialFileSystemLayout

import org.apache.hadoop.hbase.HBaseFileSystem; //导入方法依赖的package包/类
/**
 * Create initial layout in filesystem.
 * <ol>
 * <li>Check if the root region exists and is readable, if not create it.
 * Create hbase.version and the -ROOT- directory if not one.
 * </li>
 * <li>Create a log archive directory for RS to put archived logs</li>
 * </ol>
 * Idempotent.
 */
private Path createInitialFileSystemLayout() throws IOException {
  // check if the root directory exists
  checkRootDir(this.rootdir, conf, this.fs);

  // check if temp directory exists and clean it
  checkTempDir(this.tempdir, conf, this.fs);

  Path oldLogDir = new Path(this.rootdir, HConstants.HREGION_OLDLOGDIR_NAME);

  // Make sure the region servers can archive their old logs
  if(!this.fs.exists(oldLogDir)) {
    HBaseFileSystem.makeDirOnFileSystem(fs, oldLogDir);
  }

  return oldLogDir;
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:27,代码来源:MasterFileSystem.java

示例2: checkTempDir

import org.apache.hadoop.hbase.HBaseFileSystem; //导入方法依赖的package包/类
/**
 * Make sure the hbase temp directory exists and is empty.
 * NOTE that this method is only executed once just after the master becomes the active one.
 */
private void checkTempDir(final Path tmpdir, final Configuration c, final FileSystem fs)
    throws IOException {
  // If the temp directory exists, clear the content (left over, from the previous run)
  if (fs.exists(tmpdir)) {
    // Archive table in temp, maybe left over from failed deletion,
    // if not the cleaner will take care of them.
    for (Path tabledir: FSUtils.getTableDirs(fs, tmpdir)) {
      for (Path regiondir: FSUtils.getRegionDirs(fs, tabledir)) {
        HFileArchiver.archiveRegion(fs, this.rootdir, tabledir, regiondir);
      }
    }
    if (!HBaseFileSystem.deleteDirFromFileSystem(fs, tmpdir)) {
      throw new IOException("Unable to clean the temp directory: " + tmpdir);
    }
  }

  // Create the temp directory
  if (!HBaseFileSystem.makeDirOnFileSystem(fs, tmpdir)) {
    throw new IOException("HBase temp directory '" + tmpdir + "' creation failure.");
  }
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:26,代码来源:MasterFileSystem.java

示例3: archiveStoreFile

import org.apache.hadoop.hbase.HBaseFileSystem; //导入方法依赖的package包/类
/**
 * Archive the store file
 * @param fs the filesystem where the store files live
 * @param regionInfo region hosting the store files
 * @param conf {@link Configuration} to examine to determine the archive directory
 * @param tableDir {@link Path} to where the table is being stored (for building the archive path)
 * @param family the family hosting the store files
 * @param storeFile file to be archived
 * @throws IOException if the files could not be correctly disposed.
 */
public static void archiveStoreFile(FileSystem fs, HRegionInfo regionInfo, Configuration conf,
    Path tableDir, byte[] family, Path storeFile) throws IOException {
  Path storeArchiveDir = HFileArchiveUtil.getStoreArchivePath(conf, regionInfo, tableDir, family);
  // make sure we don't archive if we can't and that the archive dir exists
  if (!HBaseFileSystem.makeDirOnFileSystem(fs, storeArchiveDir)) {
    throw new IOException("Could not make archive directory (" + storeArchiveDir + ") for store:"
        + Bytes.toString(family) + ", deleting compacted files instead.");
  }

  // do the actual archive
  long start = EnvironmentEdgeManager.currentTimeMillis();
  File file = new FileablePath(fs, storeFile);
  if (!resolveAndArchiveFile(storeArchiveDir, file, Long.toString(start))) {
    throw new IOException("Failed to archive/delete the file for region:"
        + regionInfo.getRegionNameAsString() + ", family:" + Bytes.toString(family) + " into "
        + storeArchiveDir + ". Something is probably awry on the filesystem.");
  }
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:29,代码来源:HFileArchiver.java

示例4: archiveStoreFile

import org.apache.hadoop.hbase.HBaseFileSystem; //导入方法依赖的package包/类
/**
 * Archive the store file
 * @param fs the filesystem where the store files live
 * @param regionInfo region hosting the store files
 * @param conf {@link Configuration} to examine to determine the archive directory
 * @param tableDir {@link Path} to where the table is being stored (for building the archive path)
 * @param family the family hosting the store files
 * @param storeFile file to be archived
 * @throws IOException if the files could not be correctly disposed.
 */
public static void archiveStoreFile(FileSystem fs, HRegionInfo regionInfo,
    Configuration conf, Path tableDir, byte[] family, Path storeFile) throws IOException {
  Path storeArchiveDir = HFileArchiveUtil.getStoreArchivePath(conf, regionInfo, tableDir, family);
  // make sure we don't archive if we can't and that the archive dir exists
  if (!HBaseFileSystem.makeDirOnFileSystem(fs, storeArchiveDir)) {
    throw new IOException("Could not make archive directory (" + storeArchiveDir + ") for store:"
        + Bytes.toString(family) + ", deleting compacted files instead.");
  }

  // do the actual archive
  long start = EnvironmentEdgeManager.currentTimeMillis();
  File file = new FileablePath(fs, storeFile);
  if (!resolveAndArchiveFile(storeArchiveDir, file, Long.toString(start))) {
    throw new IOException("Failed to archive/delete the file for region:"
        + regionInfo.getRegionNameAsString() + ", family:" + Bytes.toString(family)
        + " into " + storeArchiveDir + ". Something is probably awry on the filesystem.");
  }
}
 
开发者ID:wanhao,项目名称:IRIndex,代码行数:29,代码来源:HFileArchiver.java

示例5: moveToTemp

import org.apache.hadoop.hbase.HBaseFileSystem; //导入方法依赖的package包/类
/**
 * Move the specified file/directory to the hbase temp directory.
 * @param path The path of the file/directory to move
 * @return The temp location of the file/directory moved
 * @throws IOException in case of file-system failure
 */
public Path moveToTemp(final Path path) throws IOException {
  Path tempPath = new Path(this.tempdir, path.getName());

  // Ensure temp exists
  if (!fs.exists(tempdir) && !HBaseFileSystem.makeDirOnFileSystem(fs, tempdir)) {
    throw new IOException("HBase temp directory '" + tempdir + "' creation failure.");
  }

  if (!HBaseFileSystem.renameDirForFileSystem(fs, path, tempPath)) {
    throw new IOException("Unable to move '" + path + "' to temp '" + tempPath + "'");
  }

  return tempPath;
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:21,代码来源:MasterFileSystem.java

示例6: build

import org.apache.hadoop.hbase.HBaseFileSystem; //导入方法依赖的package包/类
/**
 * Create a store file writer. Client is responsible for closing file when done. If metadata,
 * add BEFORE closing using {@link Writer#appendMetadata}.
 */
public Writer build() throws IOException {
  if ((dir == null ? 0 : 1) + (filePath == null ? 0 : 1) != 1) {
    throw new IllegalArgumentException("Either specify parent directory " + "or file path");
  }

  if (dir == null) {
    dir = filePath.getParent();
  }

  if (!fs.exists(dir)) {
    HBaseFileSystem.makeDirOnFileSystem(fs, dir);
  }

  if (filePath == null) {
    filePath = getUniqueFile(fs, dir);
    if (!BloomFilterFactory.isGeneralBloomEnabled(conf)) {
      bloomType = BloomType.NONE;
    }
  }

  if (compressAlgo == null) {
    compressAlgo = HFile.DEFAULT_COMPRESSION_ALGORITHM;
  }
  if (comparator == null) {
    comparator = KeyValue.COMPARATOR;
  }
  return new Writer(fs, filePath, blockSize, compressAlgo, dataBlockEncoder, conf, cacheConf,
      comparator, bloomType, maxKeyCount, checksumType, bytesPerChecksum, includeMVCCReadpoint);
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:34,代码来源:StoreFile.java

示例7: convertRegionEditsToTemp

import org.apache.hadoop.hbase.HBaseFileSystem; //导入方法依赖的package包/类
Path convertRegionEditsToTemp(Path rootdir, Path edits, String tmpname) {
  List<String> components = new ArrayList<String>(10);
  do {
    components.add(edits.getName());
    edits = edits.getParent();
  } while (edits.depth() > rootdir.depth());
  Path ret = ZKSplitLog.getSplitLogDir(rootdir, tmpname);
  for (int i = components.size() - 1; i >= 0; i--) {
    ret = new Path(ret, components.get(i));
  }
  try {
    if (fs.exists(ret)) {
      LOG.warn("Found existing old temporary edits file. It could be the "
          + "result of a previous failed split attempt. Deleting "
          + ret + ", length="
          + fs.getFileStatus(ret).getLen());
      if (!HBaseFileSystem.deleteFileFromFileSystem(fs, ret)) {
        LOG.warn("Failed delete of old " + ret);
      }
    }
    Path dir = ret.getParent();
    if (!fs.exists(dir) && !HBaseFileSystem.makeDirOnFileSystem(fs, dir)) { 
        LOG.warn("mkdir failed on " + dir);
    }
  } catch (IOException e) {
    LOG.warn("Could not prepare temp staging area ", e);
    // ignore, exceptions will be thrown elsewhere
  }
  return ret;
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:31,代码来源:HLogSplitter.java

示例8: createSplitDir

import org.apache.hadoop.hbase.HBaseFileSystem; //导入方法依赖的package包/类
/**
 * @param fs Filesystem to use
 * @param splitdir Directory to store temporary split data in
 * @throws IOException If <code>splitdir</code> already exists or we fail
 * to create it.
 * @see #cleanupSplitDir(FileSystem, Path)
 */
void createSplitDir(final FileSystem fs, final Path splitdir)
throws IOException {
  if (fs.exists(splitdir)) {
    LOG.info("The " + splitdir
        + " directory exists.  Hence deleting it to recreate it");
    if (!HBaseFileSystem.deleteDirFromFileSystem(fs, splitdir)) {
      throw new IOException("Failed deletion of " + splitdir
          + " before creating them again.");
    }
  }
  if (!HBaseFileSystem.makeDirOnFileSystem(fs, splitdir))
      throw new IOException("Failed create of " + splitdir);
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:21,代码来源:SplitTransaction.java

示例9: mWinterLCCCompleteCompactionHDFS

import org.apache.hadoop.hbase.HBaseFileSystem; //导入方法依赖的package包/类
private void mWinterLCCCompleteCompactionHDFS(final Collection<StoreFile> compactedFiles,
    final StoreFile.Writer compactedFile) throws IOException {
  // lccIndexOrigPath = /hbase/lcc/xxx/.tmp/aaa.lccindex
  Path lccIndexDirPath = mWinterGetLCCIndexFilePathFromHFilePathInTmp(compactedFile.getPath());
  FileStatus[] fileStatusArray = fs.listStatus(lccIndexDirPath);
  if (fileStatusArray == null || fileStatusArray.length == 0) {
    return;
  }
  for (FileStatus fileStatus : fileStatusArray) {
    // fileStatus = /hbase/lcc/xxx/.tmp/aaa.lccindex/qualifier
    // System.out.println("winter checking lccIndexRawPath: " + fileStatus.getPath());
    // lccIndexDir = /hbase/lcc/AAA/.lccindex + Q1-Q4 + BBB
    Path lccIndexDstPath =
        new Path(new Path(lccIndexDir, fileStatus.getPath().getName()), compactedFile.getPath()
            .getName());
    // System.out.println("winter checking lccIndexDstPath: " + lccIndexDstPath);
    if (!fs.exists(lccIndexDstPath.getParent())) {
      // System.out.println("winter lccindex dir path not exists, create first: "
      // + lccIndexDstPath.getParent());
      HBaseFileSystem.makeDirOnFileSystem(fs, lccIndexDstPath.getParent());
    }
    // System.out.println("winter renaming compacted lcc index file at " + fileStatus.getPath()
    // + " to " + lccIndexDstPath);
    LOG.info("Renaming compacted index file at " + fileStatus.getPath() + " to "
        + lccIndexDstPath);
    if (!HBaseFileSystem.renameDirForFileSystem(fs, fileStatus.getPath(), lccIndexDstPath)) {
      LOG.error("Failed move of compacted index file " + fileStatus.getPath() + " to "
          + lccIndexDstPath);
      WinterOptimizer.NeedImplementation("Failed move of compacted index file "
          + fileStatus.getPath() + " to " + lccIndexDstPath);
    }
  }
  fileStatusArray = fs.listStatus(lccIndexDirPath);
  if (fileStatusArray != null && fileStatusArray.length == 0) {
    HFileArchiver.mWinterArchiveFile(conf, fs, this.region, family.getName(), lccIndexDirPath);
  } else {
    WinterOptimizer.ThrowWhenCalled("winter completeCompaction lcc dir should be empty but not: "
        + lccIndexDirPath);
  }
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:41,代码来源:Store.java

示例10: build

import org.apache.hadoop.hbase.HBaseFileSystem; //导入方法依赖的package包/类
/**
 * Create a store file writer. Client is responsible for closing file when
 * done. If metadata, add BEFORE closing using
 * {@link Writer#appendMetadata}.
 */
public Writer build() throws IOException {
  if ((dir == null ? 0 : 1) + (filePath == null ? 0 : 1) != 1) {
    throw new IllegalArgumentException("Either specify parent directory " +
        "or file path");
  }

  if (dir == null) {
    dir = filePath.getParent();
  }

  if (!fs.exists(dir)) {
    HBaseFileSystem.makeDirOnFileSystem(fs, dir);
  }

  if (filePath == null) {
    filePath = getUniqueFile(fs, dir);
    if (!BloomFilterFactory.isGeneralBloomEnabled(conf)) {
      bloomType = BloomType.NONE;
    }
  }

  if (compressAlgo == null) {
    compressAlgo = HFile.DEFAULT_COMPRESSION_ALGORITHM;
  }
  if (comparator == null) {
    comparator = KeyValue.COMPARATOR;
  }
  return new Writer(fs, filePath, blockSize, compressAlgo, dataBlockEncoder,
      conf, cacheConf, comparator, bloomType, maxKeyCount, checksumType,
      bytesPerChecksum, includeMVCCReadpoint);
}
 
开发者ID:wanhao,项目名称:IRIndex,代码行数:37,代码来源:StoreFile.java

示例11: createHRegion

import org.apache.hadoop.hbase.HBaseFileSystem; //导入方法依赖的package包/类
/**
 * Convenience method creating new HRegions. Used by createTable.
 * The {@link HLog} for the created region needs to be closed
 * explicitly, if it is not null.
 * Use {@link HRegion#getLog()} to get access.
 *
 * @param info Info for region to create.
 * @param rootDir Root directory for HBase instance
 * @param conf
 * @param hTableDescriptor
 * @param hlog shared HLog
 * @param boolean initialize - true to initialize the region
 * @param boolean ignoreHLog
    - true to skip generate new hlog if it is null, mostly for createTable
 * @return new HRegion
 *
 * @throws IOException
 */
public static HRegion createHRegion(final HRegionInfo info, final Path rootDir,
                                    final Configuration conf,
                                    final HTableDescriptor hTableDescriptor,
                                    final HLog hlog,
                                    final boolean initialize, final boolean ignoreHLog)
    throws IOException {
  LOG.info("creating HRegion " + info.getTableNameAsString()
      + " HTD == " + hTableDescriptor + " RootDir = " + rootDir +
      " Table name == " + info.getTableNameAsString());

  Path tableDir =
      HTableDescriptor.getTableDir(rootDir, info.getTableName());
  Path regionDir = HRegion.getRegionDir(tableDir, info.getEncodedName());
  FileSystem fs = FileSystem.get(conf);
  HBaseFileSystem.makeDirOnFileSystem(fs, regionDir);
  // Write HRI to a file in case we need to recover .META.
  writeRegioninfoOnFilesystem(info, regionDir, fs, conf);
  HLog effectiveHLog = hlog;
  if (hlog == null && !ignoreHLog) {
    effectiveHLog = new HLog(fs, new Path(regionDir, HConstants.HREGION_LOGDIR_NAME),
        new Path(regionDir, HConstants.HREGION_OLDLOGDIR_NAME), conf);
  }
  HRegion region = HRegion.newHRegion(tableDir,
      effectiveHLog, fs, conf, info, hTableDescriptor, null);
  if (initialize) {
    region.initialize();
  }
  return region;
}
 
开发者ID:wanhao,项目名称:IRIndex,代码行数:48,代码来源:HRegion.java

示例12: createStoreHomeDir

import org.apache.hadoop.hbase.HBaseFileSystem; //导入方法依赖的package包/类
/**
 * Create this store's homedir
 * @param fs
 * @param homedir
 * @return Return <code>homedir</code>
 * @throws IOException
 */
Path createStoreHomeDir(final FileSystem fs,
    final Path homedir) throws IOException {
  if (!fs.exists(homedir) && !HBaseFileSystem.makeDirOnFileSystem(fs, homedir)) {
      throw new IOException("Failed create of: " + homedir.toString());
  }
  return homedir;
}
 
开发者ID:wanhao,项目名称:IRIndex,代码行数:15,代码来源:Store.java

示例13: checkRootDir

import org.apache.hadoop.hbase.HBaseFileSystem; //导入方法依赖的package包/类
/**
 * Get the rootdir.  Make sure its wholesome and exists before returning.
 * @param rd
 * @param conf
 * @param fs
 * @return hbase.rootdir (after checks for existence and bootstrapping if
 * needed populating the directory with necessary bootup files).
 * @throws IOException
 */
private Path checkRootDir(final Path rd, final Configuration c,
  final FileSystem fs)
throws IOException {
  // If FS is in safe mode wait till out of it.
  FSUtils.waitOnSafeMode(c, c.getInt(HConstants.THREAD_WAKE_FREQUENCY,
      10 * 1000));
  // Filesystem is good. Go ahead and check for hbase.rootdir.
  try {
    if (!fs.exists(rd)) {
      HBaseFileSystem.makeDirOnFileSystem(fs, rd);
      // DFS leaves safe mode with 0 DNs when there are 0 blocks.
      // We used to handle this by checking the current DN count and waiting until
      // it is nonzero. With security, the check for datanode count doesn't work --
      // it is a privileged op. So instead we adopt the strategy of the jobtracker
      // and simply retry file creation during bootstrap indefinitely. As soon as
      // there is one datanode it will succeed. Permission problems should have
      // already been caught by mkdirs above.
      FSUtils.setVersion(fs, rd, c.getInt(HConstants.THREAD_WAKE_FREQUENCY,
        10 * 1000), c.getInt(HConstants.VERSION_FILE_WRITE_ATTEMPTS,
      		  HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS));
    } else {
      if (!fs.isDirectory(rd)) {
        throw new IllegalArgumentException(rd.toString() + " is not a directory");
      }
      // as above
      FSUtils.checkVersion(fs, rd, true, c.getInt(HConstants.THREAD_WAKE_FREQUENCY,
        10 * 1000), c.getInt(HConstants.VERSION_FILE_WRITE_ATTEMPTS,
      		  HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS));
    }
  } catch (IllegalArgumentException iae) {
    LOG.fatal("Please fix invalid configuration for "
      + HConstants.HBASE_DIR + " " + rd.toString(), iae);
    throw iae;
  }
  // Make sure cluster ID exists
  if (!FSUtils.checkClusterIdExists(fs, rd, c.getInt(
      HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000))) {
    FSUtils.setClusterId(fs, rd, UUID.randomUUID().toString(), c.getInt(
        HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000));
  }
  clusterId = FSUtils.getClusterId(fs, rd);

  // Make sure the root region directory exists!
  if (!FSUtils.rootRegionExists(fs, rd)) {
    bootstrap(rd, c);
  }
  createRootTableInfo(rd);
  return rd;
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:59,代码来源:MasterFileSystem.java

示例14: archiveStoreFiles

import org.apache.hadoop.hbase.HBaseFileSystem; //导入方法依赖的package包/类
/**
 * Remove the store files, either by archiving them or outright deletion
 * @param conf {@link Configuration} to examine to determine the archive directory
 * @param fs the filesystem where the store files live
 * @param parent Parent region hosting the store files
 * @param family the family hosting the store files
 * @param compactedFiles files to be disposed of. No further reading of these files should be
 *          attempted; otherwise likely to cause an {@link IOException}
 * @throws IOException if the files could not be correctly disposed.
 */
public static void archiveStoreFiles(Configuration conf, FileSystem fs, HRegion parent,
    byte[] family, Collection<StoreFile> compactedFiles) throws IOException {

  // sometimes in testing, we don't have rss, so we need to check for that
  if (fs == null) {
    LOG.warn("Passed filesystem is null, so just deleting the files without archiving for region:"
        + Bytes.toString(parent.getRegionName()) + ", family:" + Bytes.toString(family));
    deleteStoreFilesWithoutArchiving(compactedFiles);
    return;
  }

  // short circuit if we don't have any files to delete
  if (compactedFiles.size() == 0) {
    LOG.debug("No store files to dispose, done!");
    return;
  }

  // build the archive path
  if (parent == null || family == null) throw new IOException(
      "Need to have a parent region and a family to archive from.");

  Path storeArchiveDir = HFileArchiveUtil.getStoreArchivePath(conf, parent, family);

  // make sure we don't archive if we can't and that the archive dir exists
  if (!HBaseFileSystem.makeDirOnFileSystem(fs, storeArchiveDir)) {
    throw new IOException("Could not make archive directory (" + storeArchiveDir + ") for store:"
        + Bytes.toString(family) + ", deleting compacted files instead.");
  }

  // otherwise we attempt to archive the store files
  LOG.debug("Archiving compacted store files.");

  // wrap the storefile into a File
  StoreToFile getStorePath = new StoreToFile(fs);
  Collection<File> storeFiles = Collections2.transform(compactedFiles, getStorePath);

  // add index files if exist
  FileableIndexPath indexPath = new FileableIndexPath(fs, compactedFiles);
  if (!indexPath.getChildren().isEmpty()) {
    storeFiles = new ArrayList<File>(storeFiles);
    storeFiles.add(indexPath);
  }

  FileableLCCIndexPath lccIndexPath = new FileableLCCIndexPath(fs, compactedFiles);
  if (!lccIndexPath.getChildren().isEmpty()) {
    storeFiles = new ArrayList<File>(storeFiles);
    storeFiles.add(lccIndexPath);
  }

  // WinterOptimizer.WaitForOptimizing("winter what to do about Archive?");

  // do the actual archive
  if (!resolveAndArchive(fs, storeArchiveDir, storeFiles)) {
    throw new IOException("Failed to archive/delete all the files for region:"
        + Bytes.toString(parent.getRegionName()) + ", family:" + Bytes.toString(family)
        + " into " + storeArchiveDir + ". Something is probably awry on the filesystem.");
  }
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:69,代码来源:HFileArchiver.java

示例15: mWinterArchiveFile

import org.apache.hadoop.hbase.HBaseFileSystem; //导入方法依赖的package包/类
public static void mWinterArchiveFile(Configuration conf, FileSystem fs, HRegion parent,
    byte[] family, Path path) throws IOException {

  // sometimes in testing, we don't have rss, so we need to check for that
  if (fs == null) {
    WinterOptimizer
        .ThrowWhenCalled("mWinterArchiveFile Passed filesystem is null, so just deleting the files without archiving for region:"
            + Bytes.toString(parent.getRegionName())
            + ", family:"
            + Bytes.toString(family)
            + " path: " + path.toString());
    return;
  }

  // short circuit if we don't have any files to delete
  if (path == null) {
    LOG.debug("Winter mWinterArchiveFile path is null, return!");
    return;
  }

  // build the archive path
  if (parent == null || family == null) throw new IOException(
      "Need to have a parent region and a family to archive from.");

  Path storeArchiveDir = HFileArchiveUtil.getStoreArchivePath(conf, parent, family);

  // make sure we don't archive if we can't and that the archive dir exists
  if (!HBaseFileSystem.makeDirOnFileSystem(fs, storeArchiveDir)) {
    throw new IOException("Could not make archive directory (" + storeArchiveDir + ") for store:"
        + Bytes.toString(family) + ", deleting compacted files instead.");
  }

  // otherwise we attempt to archive the store files
  LOG.debug("Archiving compacted store files.");

  // wrap the storefile into a File

  Collection<File> storeFiles = new ArrayList<HFileArchiver.File>();
  storeFiles.add(new FileableLCCIndexDirFile(fs, path));
 
  // do the actual archive
  if (!resolveAndArchive(fs, storeArchiveDir, storeFiles)) {
    throw new IOException("Failed to archive/delete all the files for region:"
        + Bytes.toString(parent.getRegionName()) + ", family:" + Bytes.toString(family)
        + " into " + storeArchiveDir + ". Something is probably awry on the filesystem.");
  }
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:48,代码来源:HFileArchiver.java


注:本文中的org.apache.hadoop.hbase.HBaseFileSystem.makeDirOnFileSystem方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。