当前位置: 首页>>代码示例>>Java>>正文


Java HFileArchiver.archiveRegion方法代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.backup.HFileArchiver.archiveRegion方法的典型用法代码示例。如果您正苦于以下问题:Java HFileArchiver.archiveRegion方法的具体用法?Java HFileArchiver.archiveRegion怎么用?Java HFileArchiver.archiveRegion使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hbase.backup.HFileArchiver的用法示例。


在下文中一共展示了HFileArchiver.archiveRegion方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: cleanMergeRegion

import org.apache.hadoop.hbase.backup.HFileArchiver; //导入方法依赖的package包/类
/**
 * If merged region no longer holds reference to the merge regions, archive
 * merge region on hdfs and perform deleting references in hbase:meta
 * @param mergedRegion
 * @param regionA
 * @param regionB
 * @return true if we delete references in merged region on hbase:meta and archive
 *         the files on the file system
 * @throws IOException
 */
boolean cleanMergeRegion(final HRegionInfo mergedRegion,
    final HRegionInfo regionA, final HRegionInfo regionB) throws IOException {
  FileSystem fs = this.services.getMasterFileSystem().getFileSystem();
  Path rootdir = this.services.getMasterFileSystem().getRootDir();
  Path tabledir = FSUtils.getTableDir(rootdir, mergedRegion.getTable());
  HTableDescriptor htd = getTableDescriptor(mergedRegion.getTable());
  HRegionFileSystem regionFs = null;
  try {
    regionFs = HRegionFileSystem.openRegionFromFileSystem(
        this.services.getConfiguration(), fs, tabledir, mergedRegion, true);
  } catch (IOException e) {
    LOG.warn("Merged region does not exist: " + mergedRegion.getEncodedName());
  }
  if (regionFs == null || !regionFs.hasReferences(htd)) {
    LOG.debug("Deleting region " + regionA.getRegionNameAsString() + " and "
        + regionB.getRegionNameAsString()
        + " from fs because merged region no longer holds references");
    HFileArchiver.archiveRegion(this.services.getConfiguration(), fs, regionA);
    HFileArchiver.archiveRegion(this.services.getConfiguration(), fs, regionB);
    MetaTableAccessor.deleteMergeQualifiers(server.getConnection(),
      mergedRegion);
    return true;
  }
  return false;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:36,代码来源:CatalogJanitor.java

示例2: checkTempDir

import org.apache.hadoop.hbase.backup.HFileArchiver; //导入方法依赖的package包/类
/**
 * Make sure the hbase temp directory exists and is empty.
 * NOTE that this method is only executed once just after the master becomes the active one.
 */
private void checkTempDir(final Path tmpdir, final Configuration c, final FileSystem fs)
    throws IOException {
  // If the temp directory exists, clear the content (left over, from the previous run)
  if (fs.exists(tmpdir)) {
    // Archive table in temp, maybe left over from failed deletion,
    // if not the cleaner will take care of them.
    for (Path tabledir: FSUtils.getTableDirs(fs, tmpdir)) {
      for (Path regiondir: FSUtils.getRegionDirs(fs, tabledir)) {
        HFileArchiver.archiveRegion(fs, this.rootdir, tabledir, regiondir);
      }
    }
    if (!fs.delete(tmpdir, true)) {
      throw new IOException("Unable to clean the temp directory: " + tmpdir);
    }
  }

  // Create the temp directory
  if (!fs.mkdirs(tmpdir)) {
    throw new IOException("HBase temp directory '" + tmpdir + "' creation failure.");
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:26,代码来源:MasterFileSystem.java

示例3: deleteRegionFromFileSystem

import org.apache.hadoop.hbase.backup.HFileArchiver; //导入方法依赖的package包/类
/**
 * Remove the region from the table directory, archiving the region's hfiles.
 *
 * @param conf       the {@link Configuration} to use
 * @param fs         {@link FileSystem} from which to remove the region
 * @param tableDir   {@link Path} to where the table is being stored
 * @param regionInfo {@link HRegionInfo} for region to be deleted
 * @throws IOException if the request cannot be completed
 */
public static void deleteRegionFromFileSystem(final Configuration conf, final FileSystem fs,
    final Path tableDir, final HRegionInfo regionInfo) throws IOException {
  HRegionFileSystem regionFs = new HRegionFileSystem(conf, fs, tableDir, regionInfo);
  Path regionDir = regionFs.getRegionDir();

  if (!fs.exists(regionDir)) {
    LOG.warn("Trying to delete a region that do not exists on disk: " + regionDir);
    return;
  }

  if (LOG.isDebugEnabled()) {
    LOG.debug("DELETING region " + regionDir);
  }

  // Archive region
  Path rootDir = FSUtils.getRootDir(conf);
  HFileArchiver.archiveRegion(fs, rootDir, tableDir, regionDir);

  // Delete empty region dir
  if (!fs.delete(regionDir, true)) {
    LOG.warn("Failed delete of " + regionDir);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:33,代码来源:HRegionFileSystem.java

示例4: checkTempDir

import org.apache.hadoop.hbase.backup.HFileArchiver; //导入方法依赖的package包/类
/**
 * Make sure the hbase temp directory exists and is empty.
 * NOTE that this method is only executed once just after the master becomes the active one.
 */
private void checkTempDir(final Path tmpdir, final Configuration c, final FileSystem fs)
    throws IOException {
  // If the temp directory exists, clear the content (left over, from the previous run)
  if (fs.exists(tmpdir)) {
    // Archive table in temp, maybe left over from failed deletion,
    // if not the cleaner will take care of them.
    for (Path tabledir: FSUtils.getTableDirs(fs, tmpdir)) {
      for (Path regiondir: FSUtils.getRegionDirs(fs, tabledir)) {
        HFileArchiver.archiveRegion(fs, this.rootdir, tabledir, regiondir);
      }
    }
    if (!HBaseFileSystem.deleteDirFromFileSystem(fs, tmpdir)) {
      throw new IOException("Unable to clean the temp directory: " + tmpdir);
    }
  }

  // Create the temp directory
  if (!HBaseFileSystem.makeDirOnFileSystem(fs, tmpdir)) {
    throw new IOException("HBase temp directory '" + tmpdir + "' creation failure.");
  }
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:26,代码来源:MasterFileSystem.java

示例5: deleteRegionFromFileSystem

import org.apache.hadoop.hbase.backup.HFileArchiver; //导入方法依赖的package包/类
/**
 * Remove the region from the table directory, archiving the region's hfiles.
 * @param conf the {@link Configuration} to use
 * @param fs {@link FileSystem} from which to remove the region
 * @param tableDir {@link Path} to where the table is being stored
 * @param regionInfo {@link HRegionInfo} for region to be deleted
 * @throws IOException if the request cannot be completed
 */
public static void deleteRegionFromFileSystem(final Configuration conf,
    final FileSystem fs, final Path tableDir, final HRegionInfo regionInfo) throws IOException {
  HRegionFileSystem regionFs = new HRegionFileSystem(conf, fs, tableDir, regionInfo);
  Path regionDir = regionFs.getRegionDir();

  if (!fs.exists(regionDir)) {
    LOG.warn("Trying to delete a region that do not exists on disk: " + regionDir);
    return;
  }

  if (LOG.isDebugEnabled()) {
    LOG.debug("DELETING region " + regionDir);
  }

  // Archive region
  Path rootDir = FSUtils.getRootDir(conf);
  HFileArchiver.archiveRegion(fs, rootDir, tableDir, regionDir);

  // Delete empty region dir
  if (!fs.delete(regionDir, true)) {
    LOG.warn("Failed delete of " + regionDir);
  }
}
 
开发者ID:grokcoder,项目名称:pbase,代码行数:32,代码来源:HRegionFileSystem.java

示例6: cleanMergeRegion

import org.apache.hadoop.hbase.backup.HFileArchiver; //导入方法依赖的package包/类
/**
 * If merged region no longer holds reference to the merge regions, archive
 * merge region on hdfs and perform deleting references in hbase:meta
 * @param mergedRegion
 * @param regionA
 * @param regionB
 * @return true if we delete references in merged region on hbase:meta and archive
 *         the files on the file system
 * @throws IOException
 */
boolean cleanMergeRegion(final HRegionInfo mergedRegion,
    final HRegionInfo regionA, final HRegionInfo regionB) throws IOException {
  FileSystem fs = this.services.getMasterFileSystem().getFileSystem();
  Path rootdir = this.services.getMasterFileSystem().getRootDir();
  Path tabledir = FSUtils.getTableDir(rootdir, mergedRegion.getTable());
  HTableDescriptor htd = getTableDescriptor(mergedRegion.getTable());
  HRegionFileSystem regionFs = null;
  try {
    regionFs = HRegionFileSystem.openRegionFromFileSystem(
        this.services.getConfiguration(), fs, tabledir, mergedRegion, true);
  } catch (IOException e) {
    LOG.warn("Merged region does not exist: " + mergedRegion.getEncodedName());
  }
  if (regionFs == null || !regionFs.hasReferences(htd)) {
    LOG.debug("Deleting region " + regionA.getRegionNameAsString() + " and "
        + regionB.getRegionNameAsString()
        + " from fs because merged region no longer holds references");
    HFileArchiver.archiveRegion(this.services.getConfiguration(), fs, regionA);
    HFileArchiver.archiveRegion(this.services.getConfiguration(), fs, regionB);
    MetaEditor.deleteMergeQualifiers(server.getCatalogTracker(), mergedRegion);
    return true;
  }
  return false;
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:35,代码来源:CatalogJanitor.java

示例7: deleteRegionFromFileSystem

import org.apache.hadoop.hbase.backup.HFileArchiver; //导入方法依赖的package包/类
/**
 * Remove the region from the table directory, archiving the region's hfiles.
 * @param conf the {@link Configuration} to use
 * @param fs {@link FileSystem} from which to remove the region
 * @param tableDir {@link Path} to where the table is being stored
 * @param regionInfo {@link RegionInfo} for region to be deleted
 * @throws IOException if the request cannot be completed
 */
public static void deleteRegionFromFileSystem(final Configuration conf,
    final FileSystem fs, final Path tableDir, final RegionInfo regionInfo) throws IOException {
  HRegionFileSystem regionFs = new HRegionFileSystem(conf, fs, tableDir, regionInfo);
  Path regionDir = regionFs.getRegionDir();

  if (!fs.exists(regionDir)) {
    LOG.warn("Trying to delete a region that do not exists on disk: " + regionDir);
    return;
  }

  if (LOG.isDebugEnabled()) {
    LOG.debug("DELETING region " + regionDir);
  }

  // Archive region
  Path rootDir = FSUtils.getRootDir(conf);
  HFileArchiver.archiveRegion(fs, rootDir, tableDir, regionDir);

  // Delete empty region dir
  if (!fs.delete(regionDir, true)) {
    LOG.warn("Failed delete of " + regionDir);
  }
}
 
开发者ID:apache,项目名称:hbase,代码行数:32,代码来源:HRegionFileSystem.java

示例8: cleanParent

import org.apache.hadoop.hbase.backup.HFileArchiver; //导入方法依赖的package包/类
/**
 * If daughters no longer hold reference to the parents, delete the parent.
 * @param parent HRegionInfo of split offlined parent
 * @param rowContent Content of <code>parent</code> row in
 * <code>metaRegionName</code>
 * @return True if we removed <code>parent</code> from meta table and from
 * the filesystem.
 * @throws IOException
 */
boolean cleanParent(final HRegionInfo parent, Result rowContent)
throws IOException {
  boolean result = false;
  // Check whether it is a merged region and not clean reference
  // No necessary to check MERGEB_QUALIFIER because these two qualifiers will
  // be inserted/deleted together
  if (rowContent.getValue(HConstants.CATALOG_FAMILY,
      HConstants.MERGEA_QUALIFIER) != null) {
    // wait cleaning merge region first
    return result;
  }
  // Run checks on each daughter split.
  PairOfSameType<HRegionInfo> daughters = HRegionInfo.getDaughterRegions(rowContent);
  Pair<Boolean, Boolean> a = checkDaughterInFs(parent, daughters.getFirst());
  Pair<Boolean, Boolean> b = checkDaughterInFs(parent, daughters.getSecond());
  if (hasNoReferences(a) && hasNoReferences(b)) {
    LOG.debug("Deleting region " + parent.getRegionNameAsString() +
      " because daughter splits no longer hold references");
    FileSystem fs = this.services.getMasterFileSystem().getFileSystem();
    if (LOG.isTraceEnabled()) LOG.trace("Archiving parent region: " + parent);
    HFileArchiver.archiveRegion(this.services.getConfiguration(), fs, parent);
    MetaTableAccessor.deleteRegion(this.connection, parent);
    result = true;
  }
  return result;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:36,代码来源:CatalogJanitor.java

示例9: cleanParent

import org.apache.hadoop.hbase.backup.HFileArchiver; //导入方法依赖的package包/类
/**
 * If daughters no longer hold reference to the parents, delete the parent.
 * @param server HRegionInterface of meta server to talk to
 * @param parent HRegionInfo of split offlined parent
 * @param rowContent Content of <code>parent</code> row in
 * <code>metaRegionName</code>
 * @return True if we removed <code>parent</code> from meta table and from
 * the filesystem.
 * @throws IOException
 */
boolean cleanParent(final HRegionInfo parent, Result rowContent)
throws IOException {
  boolean result = false;
  // Run checks on each daughter split.
  HRegionInfo a_region = getDaughterRegionInfo(rowContent, HConstants.SPLITA_QUALIFIER);
  HRegionInfo b_region = getDaughterRegionInfo(rowContent, HConstants.SPLITB_QUALIFIER);
  Pair<Boolean, Boolean> a =
    checkDaughterInFs(parent, a_region, HConstants.SPLITA_QUALIFIER);
  Pair<Boolean, Boolean> b =
    checkDaughterInFs(parent, b_region, HConstants.SPLITB_QUALIFIER);
  if (hasNoReferences(a) && hasNoReferences(b)) {
    LOG.debug("Deleting region " + parent.getRegionNameAsString() +
      " because daughter splits no longer hold references");

    // This latter regionOffline should not be necessary but is done for now
    // until we let go of regionserver to master heartbeats.  See HBASE-3368.
    if (this.services.getAssignmentManager() != null) {
      // The mock used in testing catalogjanitor returns null for getAssignmnetManager.
      // Allow for null result out of getAssignmentManager.
      this.services.getAssignmentManager().regionOffline(parent);
    }
    FileSystem fs = this.services.getMasterFileSystem().getFileSystem();
    HFileArchiver.archiveRegion(this.services.getConfiguration(), fs, parent);
    MetaEditor.deleteRegion(this.server.getCatalogTracker(), parent);
    result = true;
  }
  return result;
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:39,代码来源:CatalogJanitor.java

示例10: removeHdfsRegions

import org.apache.hadoop.hbase.backup.HFileArchiver; //导入方法依赖的package包/类
/**
 * Remove specified regions from the file-system, using the archiver.
 */
private void removeHdfsRegions(final List<HRegionInfo> regions) throws IOException {
  if (regions != null && regions.size() > 0) {
    for (HRegionInfo hri: regions) {
      HFileArchiver.archiveRegion(conf, fs, hri);
    }
  }
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:11,代码来源:RestoreSnapshotHelper.java

示例11: teardown

import org.apache.hadoop.hbase.backup.HFileArchiver; //导入方法依赖的package包/类
@After
public void teardown() throws IOException {
  // Pass null table directory path to delete region.
  HFileArchiver.archiveRegion(basedir.getFileSystem(TEST_UTIL.getConfiguration()), basedir, null,
    new Path(FSUtils.getTableDir(basedir, userRegion.getRegionInfo().getTable()), userRegion
        .getRegionInfo().getEncodedName()));
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:8,代码来源:TestDelete.java

示例12: removeTableData

import org.apache.hadoop.hbase.backup.HFileArchiver; //导入方法依赖的package包/类
/**
 * Removes the table from hbase:meta and archives the HDFS files.
 */
protected void removeTableData(final List<HRegionInfo> regions)
throws IOException, CoordinatedStateException {
  // 1. Remove regions from META
  LOG.debug("Deleting regions from META");
  MetaTableAccessor.deleteRegions(this.server.getConnection(), regions);

  // -----------------------------------------------------------------------
  // NOTE: At this point we still have data on disk, but nothing in hbase:meta
  //       if the rename below fails, hbck will report an inconsistency.
  // -----------------------------------------------------------------------

  // 2. Move the table in /hbase/.tmp
  MasterFileSystem mfs = this.masterServices.getMasterFileSystem();
  Path tempTableDir = mfs.moveTableToTemp(tableName);

  // 3. Archive regions from FS (temp directory)
  FileSystem fs = mfs.getFileSystem();
  for (HRegionInfo hri : regions) {
    LOG.debug("Archiving region " + hri.getRegionNameAsString() + " from FS");
    HFileArchiver.archiveRegion(fs, mfs.getRootDir(),
        tempTableDir, HRegion.getRegionDir(tempTableDir, hri.getEncodedName()));
  }

  // 4. Delete table directory from FS (temp directory)
  if (!fs.delete(tempTableDir, true)) {
    LOG.error("Couldn't delete " + tempTableDir);
  }

  LOG.debug("Table '" + tableName + "' archived!");
}
 
开发者ID:grokcoder,项目名称:pbase,代码行数:34,代码来源:DeleteTableHandler.java

示例13: cleanParent

import org.apache.hadoop.hbase.backup.HFileArchiver; //导入方法依赖的package包/类
/**
 * If daughters no longer hold reference to the parents, delete the parent.
 * @param parent HRegionInfo of split offlined parent
 * @param rowContent Content of <code>parent</code> row in
 * <code>metaRegionName</code>
 * @return True if we removed <code>parent</code> from meta table and from
 * the filesystem.
 * @throws IOException
 */
boolean cleanParent(final HRegionInfo parent, Result rowContent)
throws IOException {
  boolean result = false;
  // Check whether it is a merged region and not clean reference
  // No necessary to check MERGEB_QUALIFIER because these two qualifiers will
  // be inserted/deleted together
  if (rowContent.getValue(HConstants.CATALOG_FAMILY,
      HConstants.MERGEA_QUALIFIER) != null) {
    // wait cleaning merge region first
    return result;
  }
  // Run checks on each daughter split.
  PairOfSameType<HRegionInfo> daughters = HRegionInfo.getDaughterRegions(rowContent);
  Pair<Boolean, Boolean> a = checkDaughterInFs(parent, daughters.getFirst());
  Pair<Boolean, Boolean> b = checkDaughterInFs(parent, daughters.getSecond());
  if (hasNoReferences(a) && hasNoReferences(b)) {
    LOG.debug("Deleting region " + parent.getRegionNameAsString() +
      " because daughter splits no longer hold references");
    FileSystem fs = this.services.getMasterFileSystem().getFileSystem();
    if (LOG.isTraceEnabled()) LOG.trace("Archiving parent region: " + parent);
    HFileArchiver.archiveRegion(this.services.getConfiguration(), fs, parent);
    MetaEditor.deleteRegion(this.server.getCatalogTracker(), parent);
    result = true;
  }
  return result;
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:36,代码来源:CatalogJanitor.java

示例14: checkTempDir

import org.apache.hadoop.hbase.backup.HFileArchiver; //导入方法依赖的package包/类
/**
 * Make sure the hbase temp directory exists and is empty.
 * NOTE that this method is only executed once just after the master becomes the active one.
 */
private void checkTempDir(final Path tmpdir, final Configuration c, final FileSystem fs)
    throws IOException {
  // If the temp directory exists, clear the content (left over, from the previous run)
  if (fs.exists(tmpdir)) {
    // Archive table in temp, maybe left over from failed deletion,
    // if not the cleaner will take care of them.
    for (Path tabledir: FSUtils.getTableDirs(fs, tmpdir)) {
      for (Path regiondir: FSUtils.getRegionDirs(fs, tabledir)) {
        HFileArchiver.archiveRegion(fs, this.rootdir, tabledir, regiondir);
      }
    }
    if (!fs.delete(tmpdir, true)) {
      throw new IOException("Unable to clean the temp directory: " + tmpdir);
    }
  }

  // Create the temp directory
  if (isSecurityEnabled) {
    if (!fs.mkdirs(tmpdir, secureRootSubDirPerms)) {
      throw new IOException("HBase temp directory '" + tmpdir + "' creation failure.");
    }
  } else {
    if (!fs.mkdirs(tmpdir)) {
      throw new IOException("HBase temp directory '" + tmpdir + "' creation failure.");
    }
  }
}
 
开发者ID:apache,项目名称:hbase,代码行数:32,代码来源:MasterFileSystem.java

示例15: removeTableData

import org.apache.hadoop.hbase.backup.HFileArchiver; //导入方法依赖的package包/类
/**
 * Removes the table from .META. and archives the HDFS files.
 */
protected void removeTableData(final List<HRegionInfo> regions)
    throws IOException, CoordinatedStateException {
  // 1. Remove regions from META
  LOG.debug("Deleting regions from META");
  MetaEditor.deleteRegions(this.server.getCatalogTracker(), regions);

  // -----------------------------------------------------------------------
  // NOTE: At this point we still have data on disk, but nothing in .META.
  //       if the rename below fails, hbck will report an inconsistency.
  // -----------------------------------------------------------------------

  // 2. Move the table in /hbase/.tmp
  MasterFileSystem mfs = this.masterServices.getMasterFileSystem();
  Path tempTableDir = mfs.moveTableToTemp(tableName);

  // 3. Archive regions from FS (temp directory)
  FileSystem fs = mfs.getFileSystem();
  for (HRegionInfo hri: regions) {
    LOG.debug("Archiving region " + hri.getRegionNameAsString() + " from FS");
    HFileArchiver.archiveRegion(fs, mfs.getRootDir(),
        tempTableDir, HRegion.getRegionDir(tempTableDir, hri.getEncodedName()));
  }

  // 4. Delete table directory from FS (temp directory)
  if (!fs.delete(tempTableDir, true)) {
    LOG.error("Couldn't delete " + tempTableDir);
  }

  LOG.debug("Table '" + tableName + "' archived!");
}
 
开发者ID:shenli-uiuc,项目名称:PyroDB,代码行数:34,代码来源:DeleteTableHandler.java


注:本文中的org.apache.hadoop.hbase.backup.HFileArchiver.archiveRegion方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。