当前位置: 首页>>代码示例>>Java>>正文


Java FSUtils类代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.util.FSUtils的典型用法代码示例。如果您正苦于以下问题:Java FSUtils类的具体用法?Java FSUtils怎么用?Java FSUtils使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


FSUtils类属于org.apache.hadoop.hbase.util包,在下文中一共展示了FSUtils类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: deleteFamilyFromFS

import org.apache.hadoop.hbase.util.FSUtils; //导入依赖的package包/类
public void deleteFamilyFromFS(HRegionInfo region, byte[] familyName)
    throws IOException {
  // archive family store files
  Path tableDir = FSUtils.getTableDir(rootdir, region.getTable());
  HFileArchiver.archiveFamily(fs, conf, region, tableDir, familyName);

  // delete the family folder
  Path familyDir = new Path(tableDir,
    new Path(region.getEncodedName(), Bytes.toString(familyName)));
  if (fs.delete(familyDir, true) == false) {
    if (fs.exists(familyDir)) {
      throw new IOException("Could not delete family "
          + Bytes.toString(familyName) + " from FileSystem for region "
          + region.getRegionNameAsString() + "(" + region.getEncodedName()
          + ")");
    }
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:19,代码来源:MasterFileSystem.java

示例2: initialize

import org.apache.hadoop.hbase.util.FSUtils; //导入依赖的package包/类
public void initialize(InputSplit split, Configuration conf) throws IOException {
  this.scan = TableMapReduceUtil.convertStringToScan(split.getScan());
  this.split = split;
  HTableDescriptor htd = split.htd;
  HRegionInfo hri = this.split.getRegionInfo();
  FileSystem fs = FSUtils.getCurrentFileSystem(conf);


  // region is immutable, this should be fine,
  // otherwise we have to set the thread read point
  scan.setIsolationLevel(IsolationLevel.READ_UNCOMMITTED);
  // disable caching of data blocks
  scan.setCacheBlocks(false);

  scanner =
      new ClientSideRegionScanner(conf, fs, new Path(split.restoreDir), htd, hri, scan, null);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:18,代码来源:TableSnapshotInputFormatImpl.java

示例3: testNamespaceJanitor

import org.apache.hadoop.hbase.util.FSUtils; //导入依赖的package包/类
@Ignore @Test
public void testNamespaceJanitor() throws Exception {
  FileSystem fs = TEST_UTIL.getTestFileSystem();

  int fsCount = fs.listStatus(new Path(FSUtils.getRootDir(TEST_UTIL.getConfiguration()),
      HConstants.BASE_NAMESPACE_DIR)).length;
  Path fakeNSPath =
      FSUtils.getNamespaceDir(FSUtils.getRootDir(TEST_UTIL.getConfiguration()), "foo");
  assertTrue(fs.mkdirs(fakeNSPath));

  String fakeZnode = ZKUtil.joinZNode(ZooKeeperWatcher.namespaceZNode, "foo");
  int zkCount = ZKUtil.listChildrenNoWatch(TEST_UTIL.getZooKeeperWatcher(),
      ZooKeeperWatcher.namespaceZNode).size();
  ZKUtil.createWithParents(TEST_UTIL.getZooKeeperWatcher(), fakeZnode);
  Thread.sleep(10000);

  //verify namespace count is the same and orphan is removed
  assertFalse(fs.exists(fakeNSPath));
  assertEquals(fsCount, fs.listStatus(new Path(FSUtils.getRootDir(TEST_UTIL.getConfiguration()),
          HConstants.BASE_NAMESPACE_DIR)).length);

  assertEquals(-1, ZKUtil.checkExists(TEST_UTIL.getZooKeeperWatcher(), fakeZnode));
  assertEquals(zkCount,
      ZKUtil.listChildrenNoWatch(TEST_UTIL.getZooKeeperWatcher(),
          ZooKeeperWatcher.namespaceZNode).size());
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:27,代码来源:TestNamespace.java

示例4: doOfflineLogSplitting

import org.apache.hadoop.hbase.util.FSUtils; //导入依赖的package包/类
/**
 * Performs log splitting for all regionserver directories.
 * @throws Exception
 */
private void doOfflineLogSplitting() throws Exception {
  LOG.info("Starting Log splitting");
  final Path rootDir = FSUtils.getRootDir(getConf());
  final Path oldLogDir = new Path(rootDir, HConstants.HREGION_OLDLOGDIR_NAME);
  // since this is the singleton, we needn't close it.
  final WALFactory factory = WALFactory.getInstance(getConf());
  FileSystem fs = FSUtils.getCurrentFileSystem(getConf());
  Path logDir = new Path(rootDir, HConstants.HREGION_LOGDIR_NAME);
  FileStatus[] regionServerLogDirs = FSUtils.listStatus(fs, logDir);
  if (regionServerLogDirs == null || regionServerLogDirs.length == 0) {
    LOG.info("No log directories to split, returning");
    return;
  }
  try {
    for (FileStatus regionServerLogDir : regionServerLogDirs) {
      // split its log dir, if exists
      WALSplitter.split(rootDir, regionServerLogDir.getPath(), oldLogDir, fs, getConf(), factory);
    }
    LOG.info("Successfully completed Log splitting");
  } catch (Exception e) {
    LOG.error("Got exception while doing Log splitting ", e);
    throw e;
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:29,代码来源:UpgradeTo96.java

示例5: cleanIfNoMetaEntry

import org.apache.hadoop.hbase.util.FSUtils; //导入依赖的package包/类
/**
 * This method does an RPC to hbase:meta. Do not call this method with a lock/synchronize held.
 * @param hris The hris to check if empty in hbase:meta and if so, clean them up.
 */
private void cleanIfNoMetaEntry(Set<HRegionInfo> hris) {
  if (hris.isEmpty()) return;
  for (HRegionInfo hri: hris) {
    try {
      // This is RPC to meta table. It is done while we have a synchronize on
      // regionstates. No progress will be made if meta is not available at this time.
      // This is a cleanup task. Not critical.
      if (MetaTableAccessor.getRegion(server.getConnection(), hri.getEncodedNameAsBytes()) ==
          null) {
        regionOffline(hri);
        FSUtils.deleteRegionDir(server.getConfiguration(), hri);
      }
    } catch (IOException e) {
      LOG.warn("Got exception while deleting " + hri + " directories from file system.", e);
    }
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:22,代码来源:RegionStates.java

示例6: cleanMergeRegion

import org.apache.hadoop.hbase.util.FSUtils; //导入依赖的package包/类
/**
 * If merged region no longer holds reference to the merge regions, archive
 * merge region on hdfs and perform deleting references in hbase:meta
 * @param mergedRegion
 * @param regionA
 * @param regionB
 * @return true if we delete references in merged region on hbase:meta and archive
 *         the files on the file system
 * @throws IOException
 */
boolean cleanMergeRegion(final HRegionInfo mergedRegion,
    final HRegionInfo regionA, final HRegionInfo regionB) throws IOException {
  FileSystem fs = this.services.getMasterFileSystem().getFileSystem();
  Path rootdir = this.services.getMasterFileSystem().getRootDir();
  Path tabledir = FSUtils.getTableDir(rootdir, mergedRegion.getTable());
  HTableDescriptor htd = getTableDescriptor(mergedRegion.getTable());
  HRegionFileSystem regionFs = null;
  try {
    regionFs = HRegionFileSystem.openRegionFromFileSystem(
        this.services.getConfiguration(), fs, tabledir, mergedRegion, true);
  } catch (IOException e) {
    LOG.warn("Merged region does not exist: " + mergedRegion.getEncodedName());
  }
  if (regionFs == null || !regionFs.hasReferences(htd)) {
    LOG.debug("Deleting region " + regionA.getRegionNameAsString() + " and "
        + regionB.getRegionNameAsString()
        + " from fs because merged region no longer holds references");
    HFileArchiver.archiveRegion(this.services.getConfiguration(), fs, regionA);
    HFileArchiver.archiveRegion(this.services.getConfiguration(), fs, regionB);
    MetaTableAccessor.deleteMergeQualifiers(server.getConnection(),
      mergedRegion);
    return true;
  }
  return false;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:36,代码来源:CatalogJanitor.java

示例7: MasterFileSystem

import org.apache.hadoop.hbase.util.FSUtils; //导入依赖的package包/类
public MasterFileSystem(Server master, MasterServices services)
throws IOException {
  this.conf = master.getConfiguration();
  this.master = master;
  this.services = services;
  // Set filesystem to be that of this.rootdir else we get complaints about
  // mismatched filesystems if hbase.rootdir is hdfs and fs.defaultFS is
  // default localfs.  Presumption is that rootdir is fully-qualified before
  // we get to here with appropriate fs scheme.
  this.rootdir = FSUtils.getRootDir(conf);
  this.tempdir = new Path(this.rootdir, HConstants.HBASE_TEMP_DIRECTORY);
  // Cover both bases, the old way of setting default fs and the new.
  // We're supposed to run on 0.20 and 0.21 anyways.
  this.fs = this.rootdir.getFileSystem(conf);
  FSUtils.setFsDefault(conf, new Path(this.fs.getUri()));
  // make sure the fs has the same conf
  fs.setConf(conf);
  // setup the filesystem variable
  // set up the archived logs path
  this.oldLogDir = createInitialFileSystemLayout();
  HFileSystem.addLocationsOrderInterceptor(conf);
  this.splitLogManager =
      new SplitLogManager(master, master.getConfiguration(), master, services,
          master.getServerName());
  this.distributedLogReplay = this.splitLogManager.isLogReplaying();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:27,代码来源:MasterFileSystem.java

示例8: createRegion

import org.apache.hadoop.hbase.util.FSUtils; //导入依赖的package包/类
protected HRegionInfo createRegion(Configuration conf, final Table htbl,
    byte[] startKey, byte[] endKey) throws IOException {
  Table meta = new HTable(conf, TableName.META_TABLE_NAME);
  HTableDescriptor htd = htbl.getTableDescriptor();
  HRegionInfo hri = new HRegionInfo(htbl.getName(), startKey, endKey);

  LOG.info("manually adding regioninfo and hdfs data: " + hri.toString());
  Path rootDir = FSUtils.getRootDir(conf);
  FileSystem fs = rootDir.getFileSystem(conf);
  Path p = new Path(FSUtils.getTableDir(rootDir, htbl.getName()),
      hri.getEncodedName());
  fs.mkdirs(p);
  Path riPath = new Path(p, HRegionFileSystem.REGION_INFO_FILE);
  FSDataOutputStream out = fs.create(riPath);
  out.write(hri.toDelimitedByteArray());
  out.close();

  // add to meta.
  MetaTableAccessor.addRegionToMeta(meta, hri);
  meta.close();
  return hri;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:23,代码来源:OfflineMetaRebuildTestCore.java

示例9: setUpBeforeClass

import org.apache.hadoop.hbase.util.FSUtils; //导入依赖的package包/类
@BeforeClass
public static void setUpBeforeClass() throws Exception {
  // Start up the mini cluster on top of an 0.94 root.dir that has data from
  // a 0.94 hbase run and see if we can migrate to 0.96
  TEST_UTIL.startMiniZKCluster();
  TEST_UTIL.startMiniDFSCluster(1);

  hbaseRootDir = TEST_UTIL.getDefaultRootDirPath();
  fs = FileSystem.get(TEST_UTIL.getConfiguration());
  FSUtils.setRootDir(TEST_UTIL.getConfiguration(), hbaseRootDir);
  zkw = TEST_UTIL.getZooKeeperWatcher();

  Path testdir = TEST_UTIL.getDataTestDir("TestUpgradeTo96");
  // get the untar 0.94 file structure

  set94FSLayout(testdir);
  setUp94Znodes();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:19,代码来源:TestUpgradeTo96.java

示例10: getFileList

import org.apache.hadoop.hbase.util.FSUtils; //导入依赖的package包/类
/**
 * Get a list of paths that need to be split given a set of server-specific directories and
 * optionally  a filter.
 *
 * See {@link DefaultWALProvider#getServerNameFromWALDirectoryName} for more info on directory
 * layout.
 *
 * Should be package-private, but is needed by
 * {@link org.apache.hadoop.hbase.wal.WALSplitter#split(Path, Path, Path, FileSystem,
 *     Configuration, WALFactory)} for tests.
 */
@VisibleForTesting
public static FileStatus[] getFileList(final Configuration conf, final List<Path> logDirs,
    final PathFilter filter)
    throws IOException {
  List<FileStatus> fileStatus = new ArrayList<FileStatus>();
  for (Path logDir : logDirs) {
    final FileSystem fs = logDir.getFileSystem(conf);
    if (!fs.exists(logDir)) {
      LOG.warn(logDir + " doesn't exist. Nothing to do!");
      continue;
    }
    FileStatus[] logfiles = FSUtils.listStatus(fs, logDir, filter);
    if (logfiles == null || logfiles.length == 0) {
      LOG.info(logDir + " is empty dir, no logs to split");
    } else {
      Collections.addAll(fileStatus, logfiles);
    }
  }
  FileStatus[] a = new FileStatus[fileStatus.size()];
  return fileStatus.toArray(a);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:33,代码来源:SplitLogManager.java

示例11: init

import org.apache.hadoop.hbase.util.FSUtils; //导入依赖的package包/类
/**
 * @param factory factory that made us, identity used for FS layout. may not be null
 * @param conf may not be null
 * @param listeners may be null
 * @param providerId differentiate between providers from one facotry, used for FS layout. may be
 *                   null
 */
@Override
public void init(final WALFactory factory, final Configuration conf,
    final List<WALActionsListener> listeners, String providerId) throws IOException {
  if (null != log) {
    throw new IllegalStateException("WALProvider.init should only be called once.");
  }
  if (null == providerId) {
    providerId = DEFAULT_PROVIDER_ID;
  }
  final String logPrefix = factory.factoryId + WAL_FILE_NAME_DELIMITER + providerId;
  log = new IOTestWAL(FileSystem.get(conf), FSUtils.getRootDir(conf),
      DefaultWALProvider.getWALDirectoryName(factory.factoryId),
      HConstants.HREGION_OLDLOGDIR_NAME, conf, listeners,
      true, logPrefix, META_WAL_PROVIDER_ID.equals(providerId) ? META_WAL_PROVIDER_ID : null);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:23,代码来源:IOTestProvider.java

示例12: setConf

import org.apache.hadoop.hbase.util.FSUtils; //导入依赖的package包/类
/**
 * This method should only be called <b>once</b>, as it starts a thread to keep the cache
 * up-to-date.
 * <p>
 * {@inheritDoc}
 */
@Override
public void setConf(Configuration conf) {
  super.setConf(conf);
  try {
    long cacheRefreshPeriod = conf.getLong(
      WAL_CACHE_REFRESH_PERIOD_CONF_KEY, DEFAULT_WAL_CACHE_REFRESH_PERIOD);
    final FileSystem fs = FSUtils.getCurrentFileSystem(conf);
    Path rootDir = FSUtils.getRootDir(conf);
    cache = new SnapshotFileCache(fs, rootDir, cacheRefreshPeriod, cacheRefreshPeriod,
        "snapshot-log-cleaner-cache-refresher", new SnapshotFileCache.SnapshotFileInspector() {
          public Collection<String> filesUnderSnapshot(final Path snapshotDir)
              throws IOException {
            return SnapshotReferenceUtil.getWALNames(fs, snapshotDir);
          }
        });
  } catch (IOException e) {
    LOG.error("Failed to create snapshot log cleaner", e);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:26,代码来源:SnapshotLogCleaner.java

示例13: createReferences

import org.apache.hadoop.hbase.util.FSUtils; //导入依赖的package包/类
/**
 * @param services Master services instance.
 * @param htd
 * @param parent
 * @param daughter
 * @param midkey
 * @param top True if we are to write a 'top' reference.
 * @return Path to reference we created.
 * @throws IOException
 */
private Path createReferences(final MasterServices services,
    final HTableDescriptor htd, final HRegionInfo parent,
    final HRegionInfo daughter, final byte [] midkey, final boolean top)
throws IOException {
  Path rootdir = services.getMasterFileSystem().getRootDir();
  Path tabledir = FSUtils.getTableDir(rootdir, parent.getTable());
  Path storedir = HStore.getStoreHomedir(tabledir, daughter,
    htd.getColumnFamilies()[0].getName());
  Reference ref =
    top? Reference.createTopReference(midkey): Reference.createBottomReference(midkey);
  long now = System.currentTimeMillis();
  // Reference name has this format: StoreFile#REF_NAME_PARSER
  Path p = new Path(storedir, Long.toString(now) + "." + parent.getEncodedName());
  FileSystem fs = services.getMasterFileSystem().getFileSystem();
  ref.write(fs, p);
  return p;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:28,代码来源:TestCatalogJanitor.java

示例14: getHFileFromBackReference

import org.apache.hadoop.hbase.util.FSUtils; //导入依赖的package包/类
/**
 * Get the full path of the HFile referenced by the back reference
 *
 * @param rootDir root hbase directory
 * @param linkRefPath Link Back Reference path
 * @return full path of the referenced hfile
 */
public static Path getHFileFromBackReference(final Path rootDir, final Path linkRefPath) {
  Pair<TableName, String> p = parseBackReferenceName(linkRefPath.getName());
  TableName linkTableName = p.getFirst();
  String linkRegionName = p.getSecond();

  String hfileName = getBackReferenceFileName(linkRefPath.getParent());
  Path familyPath = linkRefPath.getParent().getParent();
  Path regionPath = familyPath.getParent();
  Path tablePath = regionPath.getParent();

  String linkName = createHFileLinkName(FSUtils.getTableName(tablePath),
          regionPath.getName(), hfileName);
  Path linkTableDir = FSUtils.getTableDir(rootDir, linkTableName);
  Path regionDir = HRegion.getRegionDir(linkTableDir, linkRegionName);
  return new Path(new Path(regionDir, familyPath.getName()), linkName);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:24,代码来源:HFileLink.java

示例15: getSplits

import org.apache.hadoop.hbase.util.FSUtils; //导入依赖的package包/类
public static List<InputSplit> getSplits(Configuration conf) throws IOException {
  String snapshotName = getSnapshotName(conf);

  Path rootDir = FSUtils.getRootDir(conf);
  FileSystem fs = rootDir.getFileSystem(conf);

  SnapshotManifest manifest = getSnapshotManifest(conf, snapshotName, rootDir, fs);

  List<HRegionInfo> regionInfos = getRegionInfosFromManifest(manifest);

  // TODO: mapred does not support scan as input API. Work around for now.
  Scan scan = extractScanFromConf(conf);
  // the temp dir where the snapshot is restored
  Path restoreDir = new Path(conf.get(RESTORE_DIR_KEY));

  return getSplits(scan, manifest, regionInfos, restoreDir, conf);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:18,代码来源:TableSnapshotInputFormatImpl.java


注:本文中的org.apache.hadoop.hbase.util.FSUtils类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。