当前位置: 首页>>代码示例>>Java>>正文


Java FSUtils.getRootDir方法代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.util.FSUtils.getRootDir方法的典型用法代码示例。如果您正苦于以下问题:Java FSUtils.getRootDir方法的具体用法?Java FSUtils.getRootDir怎么用?Java FSUtils.getRootDir使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hbase.util.FSUtils的用法示例。


在下文中一共展示了FSUtils.getRootDir方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: doOfflineLogSplitting

import org.apache.hadoop.hbase.util.FSUtils; //导入方法依赖的package包/类
/**
 * Performs log splitting for all regionserver directories.
 * @throws Exception
 */
private void doOfflineLogSplitting() throws Exception {
  LOG.info("Starting Log splitting");
  final Path rootDir = FSUtils.getRootDir(getConf());
  final Path oldLogDir = new Path(rootDir, HConstants.HREGION_OLDLOGDIR_NAME);
  // since this is the singleton, we needn't close it.
  final WALFactory factory = WALFactory.getInstance(getConf());
  FileSystem fs = FSUtils.getCurrentFileSystem(getConf());
  Path logDir = new Path(rootDir, HConstants.HREGION_LOGDIR_NAME);
  FileStatus[] regionServerLogDirs = FSUtils.listStatus(fs, logDir);
  if (regionServerLogDirs == null || regionServerLogDirs.length == 0) {
    LOG.info("No log directories to split, returning");
    return;
  }
  try {
    for (FileStatus regionServerLogDir : regionServerLogDirs) {
      // split its log dir, if exists
      WALSplitter.split(rootDir, regionServerLogDir.getPath(), oldLogDir, fs, getConf(), factory);
    }
    LOG.info("Successfully completed Log splitting");
  } catch (Exception e) {
    LOG.error("Got exception while doing Log splitting ", e);
    throw e;
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:29,代码来源:UpgradeTo96.java

示例2: testRewritingClusterIdToPB

import org.apache.hadoop.hbase.util.FSUtils; //导入方法依赖的package包/类
@Test
public void testRewritingClusterIdToPB() throws Exception {
  TEST_UTIL.startMiniZKCluster();
  TEST_UTIL.startMiniDFSCluster(1);
  TEST_UTIL.createRootDir();
  TEST_UTIL.getConfiguration().setBoolean("hbase.replication", true);
  Path rootDir = FSUtils.getRootDir(TEST_UTIL.getConfiguration());
  FileSystem fs = rootDir.getFileSystem(TEST_UTIL.getConfiguration());
  Path filePath = new Path(rootDir, HConstants.CLUSTER_ID_FILE_NAME);
  FSDataOutputStream s = null;
  try {
    s = fs.create(filePath);
    s.writeUTF(UUID.randomUUID().toString());
  } finally {
    if (s != null) {
      s.close();
    }
  }
  TEST_UTIL.startMiniHBaseCluster(1, 1);
  HMaster master = TEST_UTIL.getHBaseCluster().getMaster();
  assertEquals(1, master.getServerManager().getOnlineServersList().size());
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:23,代码来源:TestClusterId.java

示例3: MasterFileSystem

import org.apache.hadoop.hbase.util.FSUtils; //导入方法依赖的package包/类
public MasterFileSystem(Server master, MasterServices services)
throws IOException {
  this.conf = master.getConfiguration();
  this.master = master;
  this.services = services;
  // Set filesystem to be that of this.rootdir else we get complaints about
  // mismatched filesystems if hbase.rootdir is hdfs and fs.defaultFS is
  // default localfs.  Presumption is that rootdir is fully-qualified before
  // we get to here with appropriate fs scheme.
  this.rootdir = FSUtils.getRootDir(conf);
  this.tempdir = new Path(this.rootdir, HConstants.HBASE_TEMP_DIRECTORY);
  // Cover both bases, the old way of setting default fs and the new.
  // We're supposed to run on 0.20 and 0.21 anyways.
  this.fs = this.rootdir.getFileSystem(conf);
  FSUtils.setFsDefault(conf, new Path(this.fs.getUri()));
  // make sure the fs has the same conf
  fs.setConf(conf);
  // setup the filesystem variable
  // set up the archived logs path
  this.oldLogDir = createInitialFileSystemLayout();
  HFileSystem.addLocationsOrderInterceptor(conf);
  this.splitLogManager =
      new SplitLogManager(master, master.getConfiguration(), master, services,
          master.getServerName());
  this.distributedLogReplay = this.splitLogManager.isLogReplaying();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:27,代码来源:MasterFileSystem.java

示例4: setConf

import org.apache.hadoop.hbase.util.FSUtils; //导入方法依赖的package包/类
/**
 * This method should only be called <b>once</b>, as it starts a thread to keep the cache
 * up-to-date.
 * <p>
 * {@inheritDoc}
 */
@Override
public void setConf(Configuration conf) {
  super.setConf(conf);
  try {
    long cacheRefreshPeriod = conf.getLong(
      WAL_CACHE_REFRESH_PERIOD_CONF_KEY, DEFAULT_WAL_CACHE_REFRESH_PERIOD);
    final FileSystem fs = FSUtils.getCurrentFileSystem(conf);
    Path rootDir = FSUtils.getRootDir(conf);
    cache = new SnapshotFileCache(fs, rootDir, cacheRefreshPeriod, cacheRefreshPeriod,
        "snapshot-log-cleaner-cache-refresher", new SnapshotFileCache.SnapshotFileInspector() {
          public Collection<String> filesUnderSnapshot(final Path snapshotDir)
              throws IOException {
            return SnapshotReferenceUtil.getWALNames(fs, snapshotDir);
          }
        });
  } catch (IOException e) {
    LOG.error("Failed to create snapshot log cleaner", e);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:26,代码来源:SnapshotLogCleaner.java

示例5: setConf

import org.apache.hadoop.hbase.util.FSUtils; //导入方法依赖的package包/类
public void setConf(final Configuration conf) {
  super.setConf(conf);
  try {
    long cacheRefreshPeriod = conf.getLong(HFILE_CACHE_REFRESH_PERIOD_CONF_KEY,
      DEFAULT_HFILE_CACHE_REFRESH_PERIOD);
    final FileSystem fs = FSUtils.getCurrentFileSystem(conf);
    Path rootDir = FSUtils.getRootDir(conf);
    cache = new SnapshotFileCache(fs, rootDir, cacheRefreshPeriod, cacheRefreshPeriod,
        "snapshot-hfile-cleaner-cache-refresher", new SnapshotFileCache.SnapshotFileInspector() {
          public Collection<String> filesUnderSnapshot(final Path snapshotDir)
              throws IOException {
            return SnapshotReferenceUtil.getHFileNames(conf, fs, snapshotDir);
          }
        });
  } catch (IOException e) {
    LOG.error("Failed to create cleaner util", e);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:19,代码来源:SnapshotHFileCleaner.java

示例6: createRegion

import org.apache.hadoop.hbase.util.FSUtils; //导入方法依赖的package包/类
protected HRegionInfo createRegion(Configuration conf, final Table htbl,
    byte[] startKey, byte[] endKey) throws IOException {
  Table meta = new HTable(conf, TableName.META_TABLE_NAME);
  HTableDescriptor htd = htbl.getTableDescriptor();
  HRegionInfo hri = new HRegionInfo(htbl.getName(), startKey, endKey);

  LOG.info("manually adding regioninfo and hdfs data: " + hri.toString());
  Path rootDir = FSUtils.getRootDir(conf);
  FileSystem fs = rootDir.getFileSystem(conf);
  Path p = new Path(FSUtils.getTableDir(rootDir, htbl.getName()),
      hri.getEncodedName());
  fs.mkdirs(p);
  Path riPath = new Path(p, HRegionFileSystem.REGION_INFO_FILE);
  FSDataOutputStream out = fs.create(riPath);
  out.write(hri.toDelimitedByteArray());
  out.close();

  // add to meta.
  MetaTableAccessor.addRegionToMeta(meta, hri);
  meta.close();
  return hri;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:23,代码来源:OfflineMetaRebuildTestCore.java

示例7: getSplits

import org.apache.hadoop.hbase.util.FSUtils; //导入方法依赖的package包/类
public static List<InputSplit> getSplits(Configuration conf) throws IOException {
  String snapshotName = getSnapshotName(conf);

  Path rootDir = FSUtils.getRootDir(conf);
  FileSystem fs = rootDir.getFileSystem(conf);

  SnapshotManifest manifest = getSnapshotManifest(conf, snapshotName, rootDir, fs);

  List<HRegionInfo> regionInfos = getRegionInfosFromManifest(manifest);

  // TODO: mapred does not support scan as input API. Work around for now.
  Scan scan = extractScanFromConf(conf);
  // the temp dir where the snapshot is restored
  Path restoreDir = new Path(conf.get(RESTORE_DIR_KEY));

  return getSplits(scan, manifest, regionInfos, restoreDir, conf);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:18,代码来源:TableSnapshotInputFormatImpl.java

示例8: setUp

import org.apache.hadoop.hbase.util.FSUtils; //导入方法依赖的package包/类
/**
 * Note that this method must be called after the mini hdfs cluster has
 * started or we end up with a local file system.
 */
@Override
protected void setUp() throws Exception {
  super.setUp();
  localfs =
    (conf.get("fs.defaultFS", "file:///").compareTo("file:///") == 0);

  if (fs == null) {
    this.fs = FileSystem.get(conf);
  }
  try {
    if (localfs) {
      this.testDir = getUnitTestdir(getName());
      if (fs.exists(testDir)) {
        fs.delete(testDir, true);
      }
    } else {
      this.testDir = FSUtils.getRootDir(conf);
    }
  } catch (Exception e) {
    LOG.fatal("error during setup", e);
    throw e;
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:28,代码来源:HBaseTestCase.java

示例9: warmupHRegion

import org.apache.hadoop.hbase.util.FSUtils; //导入方法依赖的package包/类
public static void warmupHRegion(final HRegionInfo info, final HTableDescriptor htd,
    final WAL wal, final Configuration conf, final RegionServerServices rsServices,
    final CancelableProgressable reporter) throws IOException {

  if (info == null) throw new NullPointerException("Passed region info is null");

  if (LOG.isDebugEnabled()) {
    LOG.debug("HRegion.Warming up region: " + info);
  }

  Path rootDir = FSUtils.getRootDir(conf);
  Path tableDir = FSUtils.getTableDir(rootDir, info.getTable());

  FileSystem fs = null;
  if (rsServices != null) {
    fs = rsServices.getFileSystem();
  }
  if (fs == null) {
    fs = FileSystem.get(conf);
  }

  HRegion r = HRegion.newHRegion(tableDir, wal, fs, conf, info, htd, null);
  r.initializeWarmup(reporter);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:25,代码来源:HRegion.java

示例10: testFindsSnapshotFilesWhenCleaning

import org.apache.hadoop.hbase.util.FSUtils; //导入方法依赖的package包/类
@Test
public void testFindsSnapshotFilesWhenCleaning() throws IOException {
  Configuration conf = TEST_UTIL.getConfiguration();
  FSUtils.setRootDir(conf, TEST_UTIL.getDataTestDir());
  Path rootDir = FSUtils.getRootDir(conf);
  Path archivedHfileDir = new Path(TEST_UTIL.getDataTestDir(), HConstants.HFILE_ARCHIVE_DIRECTORY);

  FileSystem fs = FileSystem.get(conf);
  SnapshotHFileCleaner cleaner = new SnapshotHFileCleaner();
  cleaner.setConf(conf);

  // write an hfile to the snapshot directory
  String snapshotName = "snapshot";
  byte[] snapshot = Bytes.toBytes(snapshotName);
  TableName tableName = TableName.valueOf("table");
  Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, rootDir);
  HRegionInfo mockRegion = new HRegionInfo(tableName);
  Path regionSnapshotDir = new Path(snapshotDir, mockRegion.getEncodedName());
  Path familyDir = new Path(regionSnapshotDir, "family");
  // create a reference to a supposedly valid hfile
  String hfile = "fd1e73e8a96c486090c5cec07b4894c4";
  Path refFile = new Path(familyDir, hfile);

  // make sure the reference file exists
  fs.create(refFile);

  // create the hfile in the archive
  fs.mkdirs(archivedHfileDir);
  fs.createNewFile(new Path(archivedHfileDir, hfile));

  // make sure that the file isn't deletable
  assertFalse(cleaner.isFileDeletable(fs.getFileStatus(refFile)));
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:34,代码来源:TestSnapshotHFileCleaner.java

示例11: testSyncRunnerIndexOverflow

import org.apache.hadoop.hbase.util.FSUtils; //导入方法依赖的package包/类
@Test
public void testSyncRunnerIndexOverflow() throws IOException, NoSuchFieldException,
    SecurityException, IllegalArgumentException, IllegalAccessException {
  final String name = "testSyncRunnerIndexOverflow";
  FSHLog log =
      new FSHLog(fs, FSUtils.getRootDir(conf), name, HConstants.HREGION_OLDLOGDIR_NAME, conf,
          null, true, null, null);
  try {
    Field ringBufferEventHandlerField = FSHLog.class.getDeclaredField("ringBufferEventHandler");
    ringBufferEventHandlerField.setAccessible(true);
    FSHLog.RingBufferEventHandler ringBufferEventHandler =
        (FSHLog.RingBufferEventHandler) ringBufferEventHandlerField.get(log);
    Field syncRunnerIndexField =
        FSHLog.RingBufferEventHandler.class.getDeclaredField("syncRunnerIndex");
    syncRunnerIndexField.setAccessible(true);
    syncRunnerIndexField.set(ringBufferEventHandler, Integer.MAX_VALUE - 1);
    HTableDescriptor htd =
        new HTableDescriptor(TableName.valueOf("t1")).addFamily(new HColumnDescriptor("row"));
    HRegionInfo hri =
        new HRegionInfo(htd.getTableName(), HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
    MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl();
    for (int i = 0; i < 10; i++) {
      addEdits(log, hri, htd, 1, mvcc);
    }
  } finally {
    log.close();
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:29,代码来源:TestFSHLog.java

示例12: setUp

import org.apache.hadoop.hbase.util.FSUtils; //导入方法依赖的package包/类
@Before
public void setUp() throws Exception {
  this.conf = HBaseConfiguration.create(TEST_UTIL.getConfiguration());
  this.fs = TEST_UTIL.getDFSCluster().getFileSystem();
  this.hbaseRootDir = FSUtils.getRootDir(this.conf);
  this.oldLogDir = new Path(this.hbaseRootDir, HConstants.HREGION_OLDLOGDIR_NAME);
  this.logName = DefaultWALProvider.getWALDirectoryName(currentTest.getMethodName() + "-manual");
  this.logDir = new Path(this.hbaseRootDir, logName);
  if (TEST_UTIL.getDFSCluster().getFileSystem().exists(this.hbaseRootDir)) {
    TEST_UTIL.getDFSCluster().getFileSystem().delete(this.hbaseRootDir, true);
  }
  this.mode = (conf.getBoolean(HConstants.DISTRIBUTED_LOG_REPLAY_KEY, false) ?
      RecoveryMode.LOG_REPLAY : RecoveryMode.LOG_SPLITTING);
  this.wals = new WALFactory(conf, null, currentTest.getMethodName());
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:16,代码来源:TestWALReplay.java

示例13: createTableAndSnapshot

import org.apache.hadoop.hbase.util.FSUtils; //导入方法依赖的package包/类
public static void createTableAndSnapshot(HBaseTestingUtility util, TableName tableName,
    String snapshotName, int numRegions)
    throws Exception {
  try {
    util.deleteTable(tableName);
  } catch(Exception ex) {
    // ignore
  }

  if (numRegions > 1) {
    util.createTable(tableName, FAMILIES, 1, bbb, yyy, numRegions);
  } else {
    util.createTable(tableName, FAMILIES);
  }
  Admin admin = util.getHBaseAdmin();

  // put some stuff in the table
  HTable table = new HTable(util.getConfiguration(), tableName);
  util.loadTable(table, FAMILIES);

  Path rootDir = FSUtils.getRootDir(util.getConfiguration());
  FileSystem fs = rootDir.getFileSystem(util.getConfiguration());

  SnapshotTestingUtils.createSnapshotAndValidate(admin, tableName,
      Arrays.asList(FAMILIES), null, snapshotName, rootDir, fs, true);

  // load different values
  byte[] value = Bytes.toBytes("after_snapshot_value");
  util.loadTable(table, FAMILIES, value);

  // cause flush to create new files in the region
  admin.flush(tableName);
  table.close();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:35,代码来源:TestTableSnapshotScanner.java

示例14: cleanup

import org.apache.hadoop.hbase.util.FSUtils; //导入方法依赖的package包/类
@AfterClass
public static void cleanup() throws IOException {
  Configuration conf = TEST_UTIL.getConfiguration();
  Path rootDir = FSUtils.getRootDir(conf);
  FileSystem fs = FileSystem.get(conf);
  // cleanup
  fs.delete(rootDir, true);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:9,代码来源:TestSnapshotHFileCleaner.java

示例15: getSnapshotList

import org.apache.hadoop.hbase.util.FSUtils; //导入方法依赖的package包/类
/**
 * Returns the list of available snapshots in the specified location
 * @param conf the {@link Configuration} to use
 * @return the list of snapshots
 */
public static List<SnapshotDescription> getSnapshotList(final Configuration conf)
    throws IOException {
  Path rootDir = FSUtils.getRootDir(conf);
  FileSystem fs = FileSystem.get(rootDir.toUri(), conf);
  Path snapshotDir = SnapshotDescriptionUtils.getSnapshotsDir(rootDir);
  FileStatus[] snapshots = fs.listStatus(snapshotDir,
    new SnapshotDescriptionUtils.CompletedSnaphotDirectoriesFilter(fs));
  List<SnapshotDescription> snapshotLists =
    new ArrayList<SnapshotDescription>(snapshots.length);
  for (FileStatus snapshotDirStat: snapshots) {
    snapshotLists.add(SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDirStat.getPath()));
  }
  return snapshotLists;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:20,代码来源:SnapshotInfo.java


注:本文中的org.apache.hadoop.hbase.util.FSUtils.getRootDir方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。