当前位置: 首页>>代码示例>>Java>>正文


Java FSUtils.setRootDir方法代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.util.FSUtils.setRootDir方法的典型用法代码示例。如果您正苦于以下问题:Java FSUtils.setRootDir方法的具体用法?Java FSUtils.setRootDir怎么用?Java FSUtils.setRootDir使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hbase.util.FSUtils的用法示例。


在下文中一共展示了FSUtils.setRootDir方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: initHRegion

import org.apache.hadoop.hbase.util.FSUtils; //导入方法依赖的package包/类
private Region initHRegion(HTableDescriptor htd, byte[] startKey, byte[] stopKey, int replicaId)
    throws IOException {
  Configuration conf = TEST_UTIL.getConfiguration();
  Path tableDir = FSUtils.getTableDir(testDir, htd.getTableName());

  HRegionInfo info = new HRegionInfo(htd.getTableName(), startKey, stopKey, false, 0, replicaId);

  HRegionFileSystem fs = new FailingHRegionFileSystem(conf, tableDir.getFileSystem(conf), tableDir,
    info);
  final Configuration walConf = new Configuration(conf);
  FSUtils.setRootDir(walConf, tableDir);
  final WALFactory wals = new WALFactory(walConf, null, "log_" + replicaId);
  HRegion region = new HRegion(fs, wals.getWAL(info.getEncodedNameAsBytes()), conf, htd, null);

  region.initialize();

  return region;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:19,代码来源:TestStoreFileRefresherChore.java

示例2: setUpBeforeClass

import org.apache.hadoop.hbase.util.FSUtils; //导入方法依赖的package包/类
@BeforeClass
public static void setUpBeforeClass() throws Exception {
  // Start up the mini cluster on top of an 0.94 root.dir that has data from
  // a 0.94 hbase run and see if we can migrate to 0.96
  TEST_UTIL.startMiniZKCluster();
  TEST_UTIL.startMiniDFSCluster(1);

  hbaseRootDir = TEST_UTIL.getDefaultRootDirPath();
  fs = FileSystem.get(TEST_UTIL.getConfiguration());
  FSUtils.setRootDir(TEST_UTIL.getConfiguration(), hbaseRootDir);
  zkw = TEST_UTIL.getZooKeeperWatcher();

  Path testdir = TEST_UTIL.getDataTestDir("TestUpgradeTo96");
  // get the untar 0.94 file structure

  set94FSLayout(testdir);
  setUp94Znodes();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:19,代码来源:TestUpgradeTo96.java

示例3: setupBeforeClass

import org.apache.hadoop.hbase.util.FSUtils; //导入方法依赖的package包/类
@BeforeClass
public static void setupBeforeClass() throws Exception {
  Configuration conf = TEST_UTIL.getConfiguration();
  conf.setStrings(CoprocessorHost.WAL_COPROCESSOR_CONF_KEY,
      SampleRegionWALObserver.class.getName(), SampleRegionWALObserver.Legacy.class.getName());
  conf.set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY,
      SampleRegionWALObserver.class.getName());
  conf.setBoolean("dfs.support.append", true);
  conf.setInt("dfs.client.block.recovery.retries", 2);

  TEST_UTIL.startMiniCluster(1);
  Path hbaseRootDir = TEST_UTIL.getDFSCluster().getFileSystem()
      .makeQualified(new Path("/hbase"));
  LOG.info("hbase.rootdir=" + hbaseRootDir);
  FSUtils.setRootDir(conf, hbaseRootDir);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:17,代码来源:TestWALObserver.java

示例4: setup

import org.apache.hadoop.hbase.util.FSUtils; //导入方法依赖的package包/类
@Before
public void setup() throws Exception {
  rootDir = TEST_UTIL.getDataTestDir("testRestore");
  archiveDir = new Path(rootDir, HConstants.HFILE_ARCHIVE_DIRECTORY);
  fs = TEST_UTIL.getTestFileSystem();
  conf = TEST_UTIL.getConfiguration();
  FSUtils.setRootDir(conf, rootDir);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:9,代码来源:TestRestoreSnapshotHelper.java

示例5: setUp

import org.apache.hadoop.hbase.util.FSUtils; //导入方法依赖的package包/类
@Override
public void setUp() throws Exception {
  // setup config values necessary for store
  this.conf = TEST_UTIL.getConfiguration();
  this.conf.setLong(HConstants.MAJOR_COMPACTION_PERIOD, 0);
  this.conf.setInt("hbase.hstore.compaction.min", minFiles);
  this.conf.setInt("hbase.hstore.compaction.max", maxFiles);
  this.conf.setLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, minSize);
  this.conf.setLong("hbase.hstore.compaction.max.size", maxSize);
  this.conf.setFloat("hbase.hstore.compaction.ratio", 1.0F);

  //Setting up a Store
  final String id = TestDefaultCompactSelection.class.getName();
  Path basedir = new Path(DIR);
  final Path logdir = new Path(basedir, DefaultWALProvider.getWALDirectoryName(id));
  HColumnDescriptor hcd = new HColumnDescriptor(Bytes.toBytes("family"));
  FileSystem fs = FileSystem.get(conf);

  fs.delete(logdir, true);

  HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(Bytes.toBytes("table")));
  htd.addFamily(hcd);
  HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false);

  final Configuration walConf = new Configuration(conf);
  FSUtils.setRootDir(walConf, basedir);
  wals = new WALFactory(walConf, null, id);
  region = HRegion.createHRegion(info, basedir, conf, htd);
  HRegion.closeHRegion(region);
  Path tableDir = FSUtils.getTableDir(basedir, htd.getTableName());
  region = new HRegion(tableDir, wals.getWAL(info.getEncodedNameAsBytes()), fs, conf, info, htd,
      null);

  store = new HStore(region, hcd, conf);

  TEST_FILE = region.getRegionFileSystem().createTempName();
  fs.createNewFile(TEST_FILE);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:39,代码来源:TestDefaultCompactSelection.java

示例6: main

import org.apache.hadoop.hbase.util.FSUtils; //导入方法依赖的package包/类
/**
 * Facility for dumping and compacting catalog tables. Only does catalog tables since these are
 * only tables we for sure know schema on. For usage run:
 * <pre>
 *   ./bin/hbase org.apache.hadoop.hbase.regionserver.HRegion
 * </pre>
 *
 * @throws IOException
 */
public static void main(String[] args) throws IOException {
  if (args.length < 1) {
    printUsageAndExit(null);
  }
  boolean majorCompact = false;
  if (args.length > 1) {
    if (!args[1].toLowerCase().startsWith("major")) {
      printUsageAndExit("ERROR: Unrecognized option <" + args[1] + ">");
    }
    majorCompact = true;
  }
  final Path tableDir = new Path(args[0]);
  final Configuration c = HBaseConfiguration.create();
  final FileSystem fs = FileSystem.get(c);
  final Path logdir = new Path(c.get("hbase.tmp.dir"));
  final String logname = "wal" + FSUtils.getTableName(tableDir) + System.currentTimeMillis();

  final Configuration walConf = new Configuration(c);
  FSUtils.setRootDir(walConf, logdir);
  final WALFactory wals = new WALFactory(walConf, null, logname);
  try {
    processTable(fs, tableDir, wals, c, majorCompact);
  } finally {
    wals.close();
    // TODO: is this still right?
    BlockCache bc = new CacheConfig(c).getBlockCache();
    if (bc != null) bc.shutdown();
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:39,代码来源:HRegion.java

示例7: setUpBeforeClass

import org.apache.hadoop.hbase.util.FSUtils; //导入方法依赖的package包/类
@BeforeClass
public static void setUpBeforeClass() throws Exception {
  Configuration conf = TEST_UTIL.getConfiguration();
  conf.setBoolean("dfs.support.append", true);
  // The below config supported by 0.20-append and CDH3b2
  conf.setInt("dfs.client.block.recovery.retries", 2);
  TEST_UTIL.startMiniCluster(3);
  Path hbaseRootDir =
    TEST_UTIL.getDFSCluster().getFileSystem().makeQualified(new Path("/hbase"));
  LOG.info("hbase.rootdir=" + hbaseRootDir);
  FSUtils.setRootDir(conf, hbaseRootDir);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:13,代码来源:TestWALReplay.java

示例8: setUpBeforeClass

import org.apache.hadoop.hbase.util.FSUtils; //导入方法依赖的package包/类
@BeforeClass
public static void setUpBeforeClass() throws Exception {
  Configuration conf = TEST_UTIL.getConfiguration();
  conf.set(HConstants.CRYPTO_KEYPROVIDER_CONF_KEY, KeyProviderForTesting.class.getName());
  conf.set(HConstants.CRYPTO_MASTERKEY_NAME_CONF_KEY, "hbase");
  conf.setClass("hbase.regionserver.hlog.reader.impl", SecureProtobufLogReader.class,
    WAL.Reader.class);
  conf.setClass("hbase.regionserver.hlog.writer.impl", SecureProtobufLogWriter.class,
    WALProvider.Writer.class);
  conf.setBoolean(HConstants.ENABLE_WAL_ENCRYPTION, true);
  FSUtils.setRootDir(conf, TEST_UTIL.getDataTestDir());
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:13,代码来源:TestSecureWAL.java

示例9: setUpBeforeClass

import org.apache.hadoop.hbase.util.FSUtils; //导入方法依赖的package包/类
@BeforeClass
public static void setUpBeforeClass() throws Exception {
  Configuration c = TESTUTIL.getConfiguration();
  // We use local filesystem.  Set it so it writes into the testdir.
  FSUtils.setRootDir(c, TESTUTIL.getDataTestDir());
  DefaultMetricsSystem.setMiniClusterMode(true);
  // Startup a mini zk cluster.
  TESTUTIL.startMiniZKCluster();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:10,代码来源:TestMasterNoCluster.java

示例10: testFindsSnapshotFilesWhenCleaning

import org.apache.hadoop.hbase.util.FSUtils; //导入方法依赖的package包/类
@Test
public void testFindsSnapshotFilesWhenCleaning() throws IOException {
  Configuration conf = TEST_UTIL.getConfiguration();
  FSUtils.setRootDir(conf, TEST_UTIL.getDataTestDir());
  Path rootDir = FSUtils.getRootDir(conf);
  Path archivedHfileDir = new Path(TEST_UTIL.getDataTestDir(), HConstants.HFILE_ARCHIVE_DIRECTORY);

  FileSystem fs = FileSystem.get(conf);
  SnapshotHFileCleaner cleaner = new SnapshotHFileCleaner();
  cleaner.setConf(conf);

  // write an hfile to the snapshot directory
  String snapshotName = "snapshot";
  byte[] snapshot = Bytes.toBytes(snapshotName);
  TableName tableName = TableName.valueOf("table");
  Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, rootDir);
  HRegionInfo mockRegion = new HRegionInfo(tableName);
  Path regionSnapshotDir = new Path(snapshotDir, mockRegion.getEncodedName());
  Path familyDir = new Path(regionSnapshotDir, "family");
  // create a reference to a supposedly valid hfile
  String hfile = "fd1e73e8a96c486090c5cec07b4894c4";
  Path refFile = new Path(familyDir, hfile);

  // make sure the reference file exists
  fs.create(refFile);

  // create the hfile in the archive
  fs.mkdirs(archivedHfileDir);
  fs.createNewFile(new Path(archivedHfileDir, hfile));

  // make sure that the file isn't deletable
  assertFalse(cleaner.isFileDeletable(fs.getFileStatus(refFile)));
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:34,代码来源:TestSnapshotHFileCleaner.java

示例11: testFindsSnapshotFilesWhenCleaning

import org.apache.hadoop.hbase.util.FSUtils; //导入方法依赖的package包/类
@Test
public void testFindsSnapshotFilesWhenCleaning() throws IOException {
  Configuration conf = TEST_UTIL.getConfiguration();
  FSUtils.setRootDir(conf, TEST_UTIL.getDataTestDir());
  Path rootDir = FSUtils.getRootDir(conf);
  FileSystem fs = FileSystem.get(conf);
  SnapshotLogCleaner cleaner = new SnapshotLogCleaner();
  cleaner.setConf(conf);

  // write an hfile to the snapshot directory
  String snapshotName = "snapshot";
  byte[] snapshot = Bytes.toBytes(snapshotName);
  Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, rootDir);
  Path snapshotLogDir = new Path(snapshotDir, HConstants.HREGION_LOGDIR_NAME);
  String timestamp = "1339643343027";
  String hostFromMaster = "localhost%2C59648%2C1339643336601";

  Path hostSnapshotLogDir = new Path(snapshotLogDir, hostFromMaster);
  String snapshotlogfile = hostFromMaster + "." + timestamp + ".hbase";

  // add the reference to log in the snapshot
  fs.create(new Path(hostSnapshotLogDir, snapshotlogfile));

  // now check to see if that log file would get deleted.
  Path oldlogDir = new Path(rootDir, HConstants.HREGION_OLDLOGDIR_NAME);
  Path logFile = new Path(oldlogDir, snapshotlogfile);
  fs.create(logFile);

  // make sure that the file isn't deletable
  assertFalse(cleaner.isFileDeletable(fs.getFileStatus(logFile)));
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:32,代码来源:TestSnapshotLogCleaner.java

示例12: testHFileLink

import org.apache.hadoop.hbase.util.FSUtils; //导入方法依赖的package包/类
public void testHFileLink() throws IOException {
  final HRegionInfo hri = new HRegionInfo(TableName.valueOf("testHFileLinkTb"));
  // force temp data in hbase/target/test-data instead of /tmp/hbase-xxxx/
  Configuration testConf = new Configuration(this.conf);
  FSUtils.setRootDir(testConf, this.testDir);
  HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem(
    testConf, fs, FSUtils.getTableDir(this.testDir, hri.getTable()), hri);
  HFileContext meta = new HFileContextBuilder().withBlockSize(8 * 1024).build();

  // Make a store file and write data to it.
  StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, this.fs)
          .withFilePath(regionFs.createTempName())
          .withFileContext(meta)
          .build();
  writeStoreFile(writer);

  Path storeFilePath = regionFs.commitStoreFile(TEST_FAMILY, writer.getPath());
  Path dstPath = new Path(regionFs.getTableDir(), new Path("test-region", TEST_FAMILY));
  HFileLink.create(testConf, this.fs, dstPath, hri, storeFilePath.getName());
  Path linkFilePath = new Path(dstPath,
                HFileLink.createHFileLinkName(hri, storeFilePath.getName()));

  // Try to open store file from link
  StoreFileInfo storeFileInfo = new StoreFileInfo(testConf, this.fs, linkFilePath);
  StoreFile hsf = new StoreFile(this.fs, storeFileInfo, testConf, cacheConf,
    BloomType.NONE);
  assertTrue(storeFileInfo.isLink());

  // Now confirm that I can read from the link
  int count = 1;
  HFileScanner s = hsf.createReader().getScanner(false, false);
  s.seekTo();
  while (s.next()) {
    count++;
  }
  assertEquals((LAST_CHAR - FIRST_CHAR + 1) * (LAST_CHAR - FIRST_CHAR + 1), count);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:38,代码来源:TestStoreFile.java

示例13: main

import org.apache.hadoop.hbase.util.FSUtils; //导入方法依赖的package包/类
/**
 * Main program
 *
 * @param args
 * @throws Exception
 */
public static void main(String[] args) throws Exception {

  // create a fsck object
  Configuration conf = HBaseConfiguration.create();
  // Cover both bases, the old way of setting default fs and the new.
  // We're supposed to run on 0.20 and 0.21 anyways.
  FSUtils.setFsDefault(conf, FSUtils.getRootDir(conf));
  HBaseFsck fsck = new HBaseFsck(conf);
  boolean fixHoles = false;

  // Process command-line args.
  for (int i = 0; i < args.length; i++) {
    String cmd = args[i];
    if (cmd.equals("-details")) {
      fsck.setDisplayFullReport();
    } else if (cmd.equals("-base")) {
      if (i == args.length - 1) {
        System.err.println("OfflineMetaRepair: -base needs an HDFS path.");
        printUsageAndExit();
      }
      // update hbase root dir to user-specified base
      i++;
      FSUtils.setRootDir(conf, new Path(args[i]));
      FSUtils.setFsDefault(conf, FSUtils.getRootDir(conf));
    } else if (cmd.equals("-sidelineDir")) {
      if (i == args.length - 1) {
        System.err.println("OfflineMetaRepair: -sidelineDir needs an HDFS path.");
        printUsageAndExit();
      }
      // set the hbck sideline dir to user-specified one
      i++;
      fsck.setSidelineDir(args[i]);
    } else if (cmd.equals("-fixHoles")) {
      fixHoles = true;
    } else if (cmd.equals("-fix")) {
      // make all fix options true
      fixHoles = true;
    } else {
      String str = "Unknown command line option : " + cmd;
      LOG.info(str);
      System.out.println(str);
      printUsageAndExit();
    }
  }

  System.out.println("OfflineMetaRepair command line options: " + StringUtils.join(args, " "));

  // Fsck doesn't shutdown and and doesn't provide a way to shutdown its
  // threads cleanly, so we do a System.exit.
  boolean success = false;
  try {
    success = fsck.rebuildMeta(fixHoles);
  } catch (MultipleIOException mioes) {
    for (IOException ioe : mioes.getExceptions()) {
      LOG.error("Bailed out due to:", ioe);
    }
  } catch (Exception e) {
    LOG.error("Bailed out due to: ", e);
  } finally {
    System.exit(success ? 0 : 1);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:69,代码来源:OfflineMetaRepair.java

示例14: testRegionReplicaSecondaryIsReadOnly

import org.apache.hadoop.hbase.util.FSUtils; //导入方法依赖的package包/类
@Test
public void testRegionReplicaSecondaryIsReadOnly() throws IOException {
  // create a primary region, load some data and flush
  // create a secondary region, and do a put against that
  Path rootDir = new Path(dir + "testRegionReplicaSecondary");
  FSUtils.setRootDir(TEST_UTIL.getConfiguration(), rootDir);

  byte[][] families = new byte[][] {
      Bytes.toBytes("cf1"), Bytes.toBytes("cf2"), Bytes.toBytes("cf3")
  };
  byte[] cq = Bytes.toBytes("cq");
  HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("testRegionReplicaSecondary"));
  for (byte[] family : families) {
    htd.addFamily(new HColumnDescriptor(family));
  }

  long time = System.currentTimeMillis();
  HRegionInfo primaryHri = new HRegionInfo(htd.getTableName(),
    HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW,
    false, time, 0);
  HRegionInfo secondaryHri = new HRegionInfo(htd.getTableName(),
    HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW,
    false, time, 1);

  HRegion primaryRegion = null, secondaryRegion = null;

  try {
    primaryRegion = HRegion.createHRegion(primaryHri,
      rootDir, TEST_UTIL.getConfiguration(), htd);

    // load some data
    putData(primaryRegion, 0, 1000, cq, families);

    // flush region
    primaryRegion.flush(true);

    // open secondary region
    secondaryRegion = HRegion.openHRegion(rootDir, secondaryHri, htd, null, CONF);

    try {
      putData(secondaryRegion, 0, 1000, cq, families);
      fail("Should have thrown exception");
    } catch (IOException ex) {
      // expected
    }
  } finally {
    if (primaryRegion != null) {
      HRegion.closeHRegion(primaryRegion);
    }
    if (secondaryRegion != null) {
      HRegion.closeHRegion(secondaryRegion);
    }
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:55,代码来源:TestHRegion.java

示例15: testGetSplitEditFilesSorted

import org.apache.hadoop.hbase.util.FSUtils; //导入方法依赖的package包/类
/**
 * Assert that getSplitEditFilesSorted returns files in expected order and
 * that it skips moved-aside files.
 * @throws IOException
 */
@Test public void testGetSplitEditFilesSorted() throws IOException {
  FileSystem fs = FileSystem.get(util.getConfiguration());
  Path regiondir = util.getDataTestDir("regiondir");
  fs.delete(regiondir, true);
  fs.mkdirs(regiondir);
  Path recoverededits = WALSplitter.getRegionDirRecoveredEditsDir(regiondir);
  String first = WALSplitter.formatRecoveredEditsFileName(-1);
  createFile(fs, recoverededits, first);
  createFile(fs, recoverededits, WALSplitter.formatRecoveredEditsFileName(0));
  createFile(fs, recoverededits, WALSplitter.formatRecoveredEditsFileName(1));
  createFile(fs, recoverededits, WALSplitter
      .formatRecoveredEditsFileName(11));
  createFile(fs, recoverededits, WALSplitter.formatRecoveredEditsFileName(2));
  createFile(fs, recoverededits, WALSplitter
      .formatRecoveredEditsFileName(50));
  String last = WALSplitter.formatRecoveredEditsFileName(Long.MAX_VALUE);
  createFile(fs, recoverededits, last);
  createFile(fs, recoverededits,
    Long.toString(Long.MAX_VALUE) + "." + System.currentTimeMillis());

  final Configuration walConf = new Configuration(util.getConfiguration());
  FSUtils.setRootDir(walConf, regiondir);
  (new WALFactory(walConf, null, "dummyLogName")).getWAL(new byte[]{});

  NavigableSet<Path> files = WALSplitter.getSplitEditFilesSorted(fs, regiondir);
  assertEquals(7, files.size());
  assertEquals(files.pollFirst().getName(), first);
  assertEquals(files.pollLast().getName(), last);
  assertEquals(files.pollFirst().getName(),
    WALSplitter
      .formatRecoveredEditsFileName(0));
  assertEquals(files.pollFirst().getName(),
    WALSplitter
      .formatRecoveredEditsFileName(1));
  assertEquals(files.pollFirst().getName(),
    WALSplitter
      .formatRecoveredEditsFileName(2));
  assertEquals(files.pollFirst().getName(),
    WALSplitter
      .formatRecoveredEditsFileName(11));
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:47,代码来源:TestWALMethods.java


注:本文中的org.apache.hadoop.hbase.util.FSUtils.setRootDir方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。