当前位置: 首页>>代码示例>>Java>>正文


Java HFileCleaner类代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.master.cleaner.HFileCleaner的典型用法代码示例。如果您正苦于以下问题:Java HFileCleaner类的具体用法?Java HFileCleaner怎么用?Java HFileCleaner使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


HFileCleaner类属于org.apache.hadoop.hbase.master.cleaner包,在下文中一共展示了HFileCleaner类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: turnOnArchiving

import org.apache.hadoop.hbase.master.cleaner.HFileCleaner; //导入依赖的package包/类
/**
 * Start archiving table for given hfile cleaner
 * @param tableName table to archive
 * @param cleaner cleaner to check to make sure change propagated
 * @return underlying {@link LongTermArchivingHFileCleaner} that is managing archiving
 * @throws IOException on failure
 * @throws KeeperException on failure
 */
private List<BaseHFileCleanerDelegate> turnOnArchiving(String tableName, HFileCleaner cleaner)
    throws IOException, KeeperException {
  // turn on hfile retention
  LOG.debug("----Starting archiving for table:" + tableName);
  archivingClient.enableHFileBackupAsync(Bytes.toBytes(tableName));
  assertTrue("Archving didn't get turned on", archivingClient.getArchivingEnabled(tableName));

  // wait for the archiver to get the notification
  List<BaseHFileCleanerDelegate> cleaners = cleaner.getDelegatesForTesting();
  LongTermArchivingHFileCleaner delegate = (LongTermArchivingHFileCleaner) cleaners.get(0);
  while (!delegate.archiveTracker.keepHFiles(STRING_TABLE_NAME)) {
    // spin until propagation - should be fast
  }
  return cleaners;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:24,代码来源:TestZooKeeperTableArchiveClient.java

示例2: setConf

import org.apache.hadoop.hbase.master.cleaner.HFileCleaner; //导入依赖的package包/类
@Override
public void setConf(Configuration config) {
  // If either replication or replication of bulk load hfiles is disabled, keep all members null
  if (!(config.getBoolean(
    HConstants.REPLICATION_BULKLOAD_ENABLE_KEY,
    HConstants.REPLICATION_BULKLOAD_ENABLE_DEFAULT))) {
    LOG.warn(HConstants.REPLICATION_BULKLOAD_ENABLE_KEY
        + " is not enabled. Better to remove "
        + ReplicationHFileCleaner.class + " from " + HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS
        + " configuration.");
    return;
  }
  // Make my own Configuration. Then I'll have my own connection to zk that
  // I can close myself when time comes.
  Configuration conf = new Configuration(config);
  try {
    setConf(conf, new ZKWatcher(conf, "replicationHFileCleaner", null));
  } catch (IOException e) {
    LOG.error("Error while configuring " + this.getClass().getName(), e);
  }
}
 
开发者ID:apache,项目名称:hbase,代码行数:22,代码来源:ReplicationHFileCleaner.java

示例3: decorateMasterConfiguration

import org.apache.hadoop.hbase.master.cleaner.HFileCleaner; //导入依赖的package包/类
/**
 * This method modifies the master's configuration in order to inject replication-related features
 */
@VisibleForTesting
public static void decorateMasterConfiguration(Configuration conf) {
  String plugins = conf.get(HBASE_MASTER_LOGCLEANER_PLUGINS);
  String cleanerClass = ReplicationLogCleaner.class.getCanonicalName();
  if (!plugins.contains(cleanerClass)) {
    conf.set(HBASE_MASTER_LOGCLEANER_PLUGINS, plugins + "," + cleanerClass);
  }
  if (ReplicationUtils.isReplicationForBulkLoadDataEnabled(conf)) {
    plugins = conf.get(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS);
    cleanerClass = ReplicationHFileCleaner.class.getCanonicalName();
    if (!plugins.contains(cleanerClass)) {
      conf.set(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS, plugins + "," + cleanerClass);
    }
  }
}
 
开发者ID:apache,项目名称:hbase,代码行数:19,代码来源:HMaster.java

示例4: startServiceThreads

import org.apache.hadoop.hbase.master.cleaner.HFileCleaner; //导入依赖的package包/类
private void startServiceThreads() throws IOException{
 // Start the executor service pools
 this.service.startExecutorService(ExecutorType.MASTER_OPEN_REGION,
    conf.getInt("hbase.master.executor.openregion.threads", 5));
 this.service.startExecutorService(ExecutorType.MASTER_CLOSE_REGION,
    conf.getInt("hbase.master.executor.closeregion.threads", 5));
 this.service.startExecutorService(ExecutorType.MASTER_SERVER_OPERATIONS,
    conf.getInt("hbase.master.executor.serverops.threads", 5));
 this.service.startExecutorService(ExecutorType.MASTER_META_SERVER_OPERATIONS,
    conf.getInt("hbase.master.executor.serverops.threads", 5));
 this.service.startExecutorService(ExecutorType.M_LOG_REPLAY_OPS,
    conf.getInt("hbase.master.executor.logreplayops.threads", 10));

 // We depend on there being only one instance of this executor running
 // at a time.  To do concurrency, would need fencing of enable/disable of
 // tables.
 // Any time changing this maxThreads to > 1, pls see the comment at
 // AccessController#postCreateTableHandler
 this.service.startExecutorService(ExecutorType.MASTER_TABLE_OPERATIONS, 1);
 startProcedureExecutor();

 // Start log cleaner thread
 int cleanerInterval = conf.getInt("hbase.master.cleaner.interval", 60 * 1000);
 this.logCleaner =
    new LogCleaner(cleanerInterval,
       this, conf, getMasterFileSystem().getFileSystem(),
       getMasterFileSystem().getOldLogDir());
  getChoreService().scheduleChore(logCleaner);

 //start the hfile archive cleaner thread
  Path archiveDir = HFileArchiveUtil.getArchivePath(conf);
  this.hfileCleaner = new HFileCleaner(cleanerInterval, this, conf, getMasterFileSystem()
      .getFileSystem(), archiveDir);
  getChoreService().scheduleChore(hfileCleaner);
  serviceStarted = true;
  if (LOG.isTraceEnabled()) {
    LOG.trace("Started service threads");
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:40,代码来源:HMaster.java

示例5: testArchivingOnSingleTable

import org.apache.hadoop.hbase.master.cleaner.HFileCleaner; //导入依赖的package包/类
@Test (timeout=300000)
public void testArchivingOnSingleTable() throws Exception {
  createArchiveDirectory();
  FileSystem fs = UTIL.getTestFileSystem();
  Path archiveDir = getArchiveDir();
  Path tableDir = getTableDir(STRING_TABLE_NAME);
  toCleanup.add(archiveDir);
  toCleanup.add(tableDir);

  Configuration conf = UTIL.getConfiguration();
  // setup the delegate
  Stoppable stop = new StoppableImplementation();
  HFileCleaner cleaner = setupAndCreateCleaner(conf, fs, archiveDir, stop);
  List<BaseHFileCleanerDelegate> cleaners = turnOnArchiving(STRING_TABLE_NAME, cleaner);
  final LongTermArchivingHFileCleaner delegate = (LongTermArchivingHFileCleaner) cleaners.get(0);

  // create the region
  HColumnDescriptor hcd = new HColumnDescriptor(TEST_FAM);
  Region region = UTIL.createTestRegion(STRING_TABLE_NAME, hcd);

  loadFlushAndCompact(region, TEST_FAM);

  // get the current hfiles in the archive directory
  List<Path> files = getAllFiles(fs, archiveDir);
  if (files == null) {
    FSUtils.logFileSystemState(fs, UTIL.getDataTestDir(), LOG);
    throw new RuntimeException("Didn't archive any files!");
  }
  CountDownLatch finished = setupCleanerWatching(delegate, cleaners, files.size());

  runCleaner(cleaner, finished, stop);

  // know the cleaner ran, so now check all the files again to make sure they are still there
  List<Path> archivedFiles = getAllFiles(fs, archiveDir);
  assertEquals("Archived files changed after running archive cleaner.", files, archivedFiles);

  // but we still have the archive directory
  assertTrue(fs.exists(HFileArchiveUtil.getArchivePath(UTIL.getConfiguration())));
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:40,代码来源:TestZooKeeperTableArchiveClient.java

示例6: runCleaner

import org.apache.hadoop.hbase.master.cleaner.HFileCleaner; //导入依赖的package包/类
/**
 * @param cleaner
 */
private void runCleaner(HFileCleaner cleaner, CountDownLatch finished, Stoppable stop)
    throws InterruptedException {
  final ChoreService choreService = new ChoreService("CLEANER_SERVER_NAME");
  // run the cleaner
  choreService.scheduleChore(cleaner);
  // wait for the cleaner to check all the files
  finished.await();
  // stop the cleaner
  stop.stop("");
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:14,代码来源:TestZooKeeperTableArchiveClient.java

示例7: testArchivingOnSingleTable

import org.apache.hadoop.hbase.master.cleaner.HFileCleaner; //导入依赖的package包/类
@Test (timeout=300000)
public void testArchivingOnSingleTable() throws Exception {
  createArchiveDirectory();
  FileSystem fs = UTIL.getTestFileSystem();
  Path archiveDir = getArchiveDir();
  Path tableDir = getTableDir(STRING_TABLE_NAME);
  toCleanup.add(archiveDir);
  toCleanup.add(tableDir);

  Configuration conf = UTIL.getConfiguration();
  // setup the delegate
  Stoppable stop = new StoppableImplementation();
  HFileCleaner cleaner = setupAndCreateCleaner(conf, fs, archiveDir, stop);
  List<BaseHFileCleanerDelegate> cleaners = turnOnArchiving(STRING_TABLE_NAME, cleaner);
  final LongTermArchivingHFileCleaner delegate = (LongTermArchivingHFileCleaner) cleaners.get(0);

  // create the region
  HColumnDescriptor hcd = new HColumnDescriptor(TEST_FAM);
  HRegion region = UTIL.createTestRegion(STRING_TABLE_NAME, hcd);

  loadFlushAndCompact(region, TEST_FAM);

  // get the current hfiles in the archive directory
  List<Path> files = getAllFiles(fs, archiveDir);
  if (files == null) {
    FSUtils.logFileSystemState(fs, UTIL.getDataTestDir(), LOG);
    throw new RuntimeException("Didn't archive any files!");
  }
  CountDownLatch finished = setupCleanerWatching(delegate, cleaners, files.size());

  runCleaner(cleaner, finished, stop);

  // know the cleaner ran, so now check all the files again to make sure they are still there
  List<Path> archivedFiles = getAllFiles(fs, archiveDir);
  assertEquals("Archived files changed after running archive cleaner.", files, archivedFiles);

  // but we still have the archive directory
  assertTrue(fs.exists(HFileArchiveUtil.getArchivePath(UTIL.getConfiguration())));
}
 
开发者ID:grokcoder,项目名称:pbase,代码行数:40,代码来源:TestZooKeeperTableArchiveClient.java

示例8: runCleaner

import org.apache.hadoop.hbase.master.cleaner.HFileCleaner; //导入依赖的package包/类
/**
 * @param cleaner
 */
private void runCleaner(HFileCleaner cleaner, CountDownLatch finished, Stoppable stop)
    throws InterruptedException {
  // run the cleaner
  cleaner.start();
  // wait for the cleaner to check all the files
  finished.await();
  // stop the cleaner
  stop.stop("");
}
 
开发者ID:grokcoder,项目名称:pbase,代码行数:13,代码来源:TestZooKeeperTableArchiveClient.java

示例9: startServiceThreads

import org.apache.hadoop.hbase.master.cleaner.HFileCleaner; //导入依赖的package包/类
private void startServiceThreads() throws IOException{
 // Start the executor service pools
 this.service.startExecutorService(ExecutorType.MASTER_OPEN_REGION,
    conf.getInt("hbase.master.executor.openregion.threads", 5));
 this.service.startExecutorService(ExecutorType.MASTER_CLOSE_REGION,
    conf.getInt("hbase.master.executor.closeregion.threads", 5));
 this.service.startExecutorService(ExecutorType.MASTER_SERVER_OPERATIONS,
    conf.getInt("hbase.master.executor.serverops.threads", 5));
 this.service.startExecutorService(ExecutorType.MASTER_META_SERVER_OPERATIONS,
    conf.getInt("hbase.master.executor.serverops.threads", 5));
 this.service.startExecutorService(ExecutorType.M_LOG_REPLAY_OPS,
    conf.getInt("hbase.master.executor.logreplayops.threads", 10));

 // We depend on there being only one instance of this executor running
 // at a time.  To do concurrency, would need fencing of enable/disable of
 // tables.
 this.service.startExecutorService(ExecutorType.MASTER_TABLE_OPERATIONS, 1);

 // Start log cleaner thread
 int cleanerInterval = conf.getInt("hbase.master.cleaner.interval", 60 * 1000);
 this.logCleaner =
    new LogCleaner(cleanerInterval,
       this, conf, getMasterFileSystem().getFileSystem(),
       getMasterFileSystem().getOldLogDir());
       Threads.setDaemonThreadRunning(logCleaner.getThread(), getName() + ".oldLogCleaner");

 //start the hfile archive cleaner thread
  Path archiveDir = HFileArchiveUtil.getArchivePath(conf);
  this.hfileCleaner = new HFileCleaner(cleanerInterval, this, conf, getMasterFileSystem()
      .getFileSystem(), archiveDir);
  Threads.setDaemonThreadRunning(hfileCleaner.getThread(),
    getName() + ".archivedHFileCleaner");

  serviceStarted = true;
  if (LOG.isTraceEnabled()) {
    LOG.trace("Started service threads");
  }
}
 
开发者ID:shenli-uiuc,项目名称:PyroDB,代码行数:39,代码来源:HMaster.java

示例10: testArchivingOnSingleTable

import org.apache.hadoop.hbase.master.cleaner.HFileCleaner; //导入依赖的package包/类
@Test
public void testArchivingOnSingleTable() throws Exception {
  createArchiveDirectory();
  FileSystem fs = UTIL.getTestFileSystem();
  Path archiveDir = getArchiveDir();
  Path tableDir = getTableDir(STRING_TABLE_NAME);
  toCleanup.add(archiveDir);
  toCleanup.add(tableDir);

  Configuration conf = UTIL.getConfiguration();
  // setup the delegate
  Stoppable stop = new StoppableImplementation();
  HFileCleaner cleaner = setupAndCreateCleaner(conf, fs, archiveDir, stop);
  List<BaseHFileCleanerDelegate> cleaners = turnOnArchiving(STRING_TABLE_NAME, cleaner);
  final LongTermArchivingHFileCleaner delegate = (LongTermArchivingHFileCleaner) cleaners.get(0);

  // create the region
  HColumnDescriptor hcd = new HColumnDescriptor(TEST_FAM);
  HRegion region = UTIL.createTestRegion(STRING_TABLE_NAME, hcd);

  loadFlushAndCompact(region, TEST_FAM);

  // get the current hfiles in the archive directory
  List<Path> files = getAllFiles(fs, archiveDir);
  if (files == null) {
    FSUtils.logFileSystemState(fs, UTIL.getDataTestDir(), LOG);
    throw new RuntimeException("Didn't archive any files!");
  }
  CountDownLatch finished = setupCleanerWatching(delegate, cleaners, files.size());

  runCleaner(cleaner, finished, stop);

  // know the cleaner ran, so now check all the files again to make sure they are still there
  List<Path> archivedFiles = getAllFiles(fs, archiveDir);
  assertEquals("Archived files changed after running archive cleaner.", files, archivedFiles);

  // but we still have the archive directory
  assertTrue(fs.exists(HFileArchiveUtil.getArchivePath(UTIL.getConfiguration())));
}
 
开发者ID:daidong,项目名称:DominoHBase,代码行数:40,代码来源:TestZooKeeperTableArchiveClient.java

示例11: getHFileCleaner

import org.apache.hadoop.hbase.master.cleaner.HFileCleaner; //导入依赖的package包/类
public HFileCleaner getHFileCleaner() {
  return this.hfileCleaner;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:4,代码来源:HMaster.java

示例12: checkSnapshotSupport

import org.apache.hadoop.hbase.master.cleaner.HFileCleaner; //导入依赖的package包/类
/**
 * Called at startup, to verify if snapshot operation is supported, and to avoid
 * starting the master if there're snapshots present but the cleaners needed are missing.
 * Otherwise we can end up with snapshot data loss.
 * @param conf The {@link Configuration} object to use
 * @param mfs The MasterFileSystem to use
 * @throws IOException in case of file-system operation failure
 * @throws UnsupportedOperationException in case cleaners are missing and
 *         there're snapshot in the system
 */
private void checkSnapshotSupport(final Configuration conf, final MasterFileSystem mfs)
    throws IOException, UnsupportedOperationException {
  // Verify if snapshot is disabled by the user
  String enabled = conf.get(HBASE_SNAPSHOT_ENABLED);
  boolean snapshotEnabled = conf.getBoolean(HBASE_SNAPSHOT_ENABLED, false);
  boolean userDisabled = (enabled != null && enabled.trim().length() > 0 && !snapshotEnabled);

  // Extract cleaners from conf
  Set<String> hfileCleaners = new HashSet<String>();
  String[] cleaners = conf.getStrings(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS);
  if (cleaners != null) Collections.addAll(hfileCleaners, cleaners);

  Set<String> logCleaners = new HashSet<String>();
  cleaners = conf.getStrings(HConstants.HBASE_MASTER_LOGCLEANER_PLUGINS);
  if (cleaners != null) Collections.addAll(logCleaners, cleaners);

  // check if an older version of snapshot directory was present
  Path oldSnapshotDir = new Path(mfs.getRootDir(), HConstants.OLD_SNAPSHOT_DIR_NAME);
  FileSystem fs = mfs.getFileSystem();
  List<SnapshotDescription> ss = getCompletedSnapshots(new Path(rootDir, oldSnapshotDir));
  if (ss != null && !ss.isEmpty()) {
    LOG.error("Snapshots from an earlier release were found under: " + oldSnapshotDir);
    LOG.error("Please rename the directory as " + HConstants.SNAPSHOT_DIR_NAME);
  }

  // If the user has enabled the snapshot, we force the cleaners to be present
  // otherwise we still need to check if cleaners are enabled or not and verify
  // that there're no snapshot in the .snapshot folder.
  if (snapshotEnabled) {
    // Inject snapshot cleaners, if snapshot.enable is true
    hfileCleaners.add(SnapshotHFileCleaner.class.getName());
    hfileCleaners.add(HFileLinkCleaner.class.getName());
    logCleaners.add(SnapshotLogCleaner.class.getName());

    // Set cleaners conf
    conf.setStrings(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS,
      hfileCleaners.toArray(new String[hfileCleaners.size()]));
    conf.setStrings(HConstants.HBASE_MASTER_LOGCLEANER_PLUGINS,
      logCleaners.toArray(new String[logCleaners.size()]));
  } else {
    // Verify if cleaners are present
    snapshotEnabled = logCleaners.contains(SnapshotLogCleaner.class.getName()) &&
      hfileCleaners.contains(SnapshotHFileCleaner.class.getName()) &&
      hfileCleaners.contains(HFileLinkCleaner.class.getName());

    // Warn if the cleaners are enabled but the snapshot.enabled property is false/not set.
    if (snapshotEnabled) {
      LOG.warn("Snapshot log and hfile cleaners are present in the configuration, " +
        "but the '" + HBASE_SNAPSHOT_ENABLED + "' property " +
        (userDisabled ? "is set to 'false'." : "is not set."));
    }
  }

  // Mark snapshot feature as enabled if cleaners are present and user has not disabled it.
  this.isSnapshotSupported = snapshotEnabled && !userDisabled;

  // If cleaners are not enabled, verify that there're no snapshot in the .snapshot folder
  // otherwise we end up with snapshot data loss.
  if (!snapshotEnabled) {
    LOG.info("Snapshot feature is not enabled, missing log and hfile cleaners.");
    Path snapshotDir = SnapshotDescriptionUtils.getSnapshotsDir(mfs.getRootDir());
    if (fs.exists(snapshotDir)) {
      FileStatus[] snapshots = FSUtils.listStatus(fs, snapshotDir,
        new SnapshotDescriptionUtils.CompletedSnaphotDirectoriesFilter(fs));
      if (snapshots != null) {
        LOG.error("Snapshots are present, but cleaners are not enabled.");
        checkSnapshotSupport();
      }
    }
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:82,代码来源:SnapshotManager.java

示例13: setupAndCreateCleaner

import org.apache.hadoop.hbase.master.cleaner.HFileCleaner; //导入依赖的package包/类
private HFileCleaner setupAndCreateCleaner(Configuration conf, FileSystem fs, Path archiveDir,
    Stoppable stop) {
  conf.setStrings(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS,
    LongTermArchivingHFileCleaner.class.getCanonicalName());
  return new HFileCleaner(1000, stop, conf, fs, archiveDir);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:7,代码来源:TestZooKeeperTableArchiveClient.java

示例14: testCleaningRace

import org.apache.hadoop.hbase.master.cleaner.HFileCleaner; //导入依赖的package包/类
/**
 * Test HFileArchiver.resolveAndArchive() race condition HBASE-7643
 */
@Test
public void testCleaningRace() throws Exception {
  final long TEST_TIME = 20 * 1000;
  final ChoreService choreService = new ChoreService("TEST_SERVER_NAME");

  Configuration conf = UTIL.getMiniHBaseCluster().getMaster().getConfiguration();
  Path rootDir = UTIL.getDataTestDirOnTestFS("testCleaningRace");
  FileSystem fs = UTIL.getTestFileSystem();

  Path archiveDir = new Path(rootDir, HConstants.HFILE_ARCHIVE_DIRECTORY);
  Path regionDir = new Path(FSUtils.getTableDir(new Path("./"),
      TableName.valueOf("table")), "abcdef");
  Path familyDir = new Path(regionDir, "cf");

  Path sourceRegionDir = new Path(rootDir, regionDir);
  fs.mkdirs(sourceRegionDir);

  Stoppable stoppable = new StoppableImplementation();

  // The cleaner should be looping without long pauses to reproduce the race condition.
  HFileCleaner cleaner = new HFileCleaner(1, stoppable, conf, fs, archiveDir);
  try {
    choreService.scheduleChore(cleaner);

    // Keep creating/archiving new files while the cleaner is running in the other thread
    long startTime = System.currentTimeMillis();
    for (long fid = 0; (System.currentTimeMillis() - startTime) < TEST_TIME; ++fid) {
      Path file = new Path(familyDir,  String.valueOf(fid));
      Path sourceFile = new Path(rootDir, file);
      Path archiveFile = new Path(archiveDir, file);

      fs.createNewFile(sourceFile);

      try {
        // Try to archive the file
        HFileArchiver.archiveRegion(fs, rootDir,
            sourceRegionDir.getParent(), sourceRegionDir);

        // The archiver succeded, the file is no longer in the original location
        // but it's in the archive location.
        LOG.debug("hfile=" + fid + " should be in the archive");
        assertTrue(fs.exists(archiveFile));
        assertFalse(fs.exists(sourceFile));
      } catch (IOException e) {
        // The archiver is unable to archive the file. Probably HBASE-7643 race condition.
        // in this case, the file should not be archived, and we should have the file
        // in the original location.
        LOG.debug("hfile=" + fid + " should be in the source location");
        assertFalse(fs.exists(archiveFile));
        assertTrue(fs.exists(sourceFile));

        // Avoid to have this file in the next run
        fs.delete(sourceFile, false);
      }
    }
  } finally {
    stoppable.stop("test end");
    cleaner.cancel(true);
    choreService.shutdown();
    fs.delete(rootDir, true);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:66,代码来源:TestHFileArchiving.java

示例15: startServiceThreads

import org.apache.hadoop.hbase.master.cleaner.HFileCleaner; //导入依赖的package包/类
private void startServiceThreads() throws IOException{

   // Start the executor service pools
   this.executorService.startExecutorService(ExecutorType.MASTER_OPEN_REGION,
      conf.getInt("hbase.master.executor.openregion.threads", 5));
   this.executorService.startExecutorService(ExecutorType.MASTER_CLOSE_REGION,
      conf.getInt("hbase.master.executor.closeregion.threads", 5));
   this.executorService.startExecutorService(ExecutorType.MASTER_SERVER_OPERATIONS,
      conf.getInt("hbase.master.executor.serverops.threads", 3));
   this.executorService.startExecutorService(ExecutorType.MASTER_META_SERVER_OPERATIONS,
      conf.getInt("hbase.master.executor.serverops.threads", 5));

   // We depend on there being only one instance of this executor running
   // at a time.  To do concurrency, would need fencing of enable/disable of
   // tables.
   this.executorService.startExecutorService(ExecutorType.MASTER_TABLE_OPERATIONS, 1);

   // Start log cleaner thread
   String n = Thread.currentThread().getName();
   int cleanerInterval = conf.getInt("hbase.master.cleaner.interval", 60 * 1000);
   this.logCleaner =
      new LogCleaner(cleanerInterval,
         this, conf, getMasterFileSystem().getFileSystem(),
         getMasterFileSystem().getOldLogDir());
         Threads.setDaemonThreadRunning(logCleaner.getThread(), n + ".oldLogCleaner");

   //start the hfile archive cleaner thread
    Path archiveDir = HFileArchiveUtil.getArchivePath(conf);
    this.hfileCleaner = new HFileCleaner(cleanerInterval, this, conf, getMasterFileSystem()
        .getFileSystem(), archiveDir);
    Threads.setDaemonThreadRunning(hfileCleaner.getThread(), n + ".archivedHFileCleaner");

   // Start the health checker
   if (this.healthCheckChore != null) {
     Threads.setDaemonThreadRunning(this.healthCheckChore.getThread(), n + ".healthChecker");
   }

    // Start allowing requests to happen.
    this.rpcServer.openServer();
    if (LOG.isDebugEnabled()) {
      LOG.debug("Started service threads");
    }

  }
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:45,代码来源:HMaster.java


注:本文中的org.apache.hadoop.hbase.master.cleaner.HFileCleaner类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。