本文整理汇总了Java中org.apache.hadoop.hbase.util.FSUtils.getCurrentFileSystem方法的典型用法代码示例。如果您正苦于以下问题:Java FSUtils.getCurrentFileSystem方法的具体用法?Java FSUtils.getCurrentFileSystem怎么用?Java FSUtils.getCurrentFileSystem使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hbase.util.FSUtils
的用法示例。
在下文中一共展示了FSUtils.getCurrentFileSystem方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: initialize
import org.apache.hadoop.hbase.util.FSUtils; //导入方法依赖的package包/类
public void initialize(InputSplit split, Configuration conf) throws IOException {
this.scan = TableMapReduceUtil.convertStringToScan(split.getScan());
this.split = split;
HTableDescriptor htd = split.htd;
HRegionInfo hri = this.split.getRegionInfo();
FileSystem fs = FSUtils.getCurrentFileSystem(conf);
// region is immutable, this should be fine,
// otherwise we have to set the thread read point
scan.setIsolationLevel(IsolationLevel.READ_UNCOMMITTED);
// disable caching of data blocks
scan.setCacheBlocks(false);
scanner =
new ClientSideRegionScanner(conf, fs, new Path(split.restoreDir), htd, hri, scan, null);
}
示例2: doOfflineLogSplitting
import org.apache.hadoop.hbase.util.FSUtils; //导入方法依赖的package包/类
/**
* Performs log splitting for all regionserver directories.
* @throws Exception
*/
private void doOfflineLogSplitting() throws Exception {
LOG.info("Starting Log splitting");
final Path rootDir = FSUtils.getRootDir(getConf());
final Path oldLogDir = new Path(rootDir, HConstants.HREGION_OLDLOGDIR_NAME);
// since this is the singleton, we needn't close it.
final WALFactory factory = WALFactory.getInstance(getConf());
FileSystem fs = FSUtils.getCurrentFileSystem(getConf());
Path logDir = new Path(rootDir, HConstants.HREGION_LOGDIR_NAME);
FileStatus[] regionServerLogDirs = FSUtils.listStatus(fs, logDir);
if (regionServerLogDirs == null || regionServerLogDirs.length == 0) {
LOG.info("No log directories to split, returning");
return;
}
try {
for (FileStatus regionServerLogDir : regionServerLogDirs) {
// split its log dir, if exists
WALSplitter.split(rootDir, regionServerLogDir.getPath(), oldLogDir, fs, getConf(), factory);
}
LOG.info("Successfully completed Log splitting");
} catch (Exception e) {
LOG.error("Got exception while doing Log splitting ", e);
throw e;
}
}
示例3: setConf
import org.apache.hadoop.hbase.util.FSUtils; //导入方法依赖的package包/类
/**
* This method should only be called <b>once</b>, as it starts a thread to keep the cache
* up-to-date.
* <p>
* {@inheritDoc}
*/
@Override
public void setConf(Configuration conf) {
super.setConf(conf);
try {
long cacheRefreshPeriod = conf.getLong(
WAL_CACHE_REFRESH_PERIOD_CONF_KEY, DEFAULT_WAL_CACHE_REFRESH_PERIOD);
final FileSystem fs = FSUtils.getCurrentFileSystem(conf);
Path rootDir = FSUtils.getRootDir(conf);
cache = new SnapshotFileCache(fs, rootDir, cacheRefreshPeriod, cacheRefreshPeriod,
"snapshot-log-cleaner-cache-refresher", new SnapshotFileCache.SnapshotFileInspector() {
public Collection<String> filesUnderSnapshot(final Path snapshotDir)
throws IOException {
return SnapshotReferenceUtil.getWALNames(fs, snapshotDir);
}
});
} catch (IOException e) {
LOG.error("Failed to create snapshot log cleaner", e);
}
}
示例4: setConf
import org.apache.hadoop.hbase.util.FSUtils; //导入方法依赖的package包/类
public void setConf(final Configuration conf) {
super.setConf(conf);
try {
long cacheRefreshPeriod = conf.getLong(HFILE_CACHE_REFRESH_PERIOD_CONF_KEY,
DEFAULT_HFILE_CACHE_REFRESH_PERIOD);
final FileSystem fs = FSUtils.getCurrentFileSystem(conf);
Path rootDir = FSUtils.getRootDir(conf);
cache = new SnapshotFileCache(fs, rootDir, cacheRefreshPeriod, cacheRefreshPeriod,
"snapshot-hfile-cleaner-cache-refresher", new SnapshotFileCache.SnapshotFileInspector() {
public Collection<String> filesUnderSnapshot(final Path snapshotDir)
throws IOException {
return SnapshotReferenceUtil.getHFileNames(conf, fs, snapshotDir);
}
});
} catch (IOException e) {
LOG.error("Failed to create cleaner util", e);
}
}
示例5: recoverLease
import org.apache.hadoop.hbase.util.FSUtils; //导入方法依赖的package包/类
private void recoverLease(final Configuration conf, final Path path) {
try {
final FileSystem dfs = FSUtils.getCurrentFileSystem(conf);
FSUtils fsUtils = FSUtils.getInstance(dfs, conf);
fsUtils.recoverFileLease(dfs, path, conf, new CancelableProgressable() {
@Override
public boolean progress() {
LOG.debug("recover WAL lease: " + path);
return isWorkerActive();
}
});
} catch (IOException e) {
LOG.warn("unable to recover lease for WAL: " + path, e);
}
}
示例6: SnapshotFileCache
import org.apache.hadoop.hbase.util.FSUtils; //导入方法依赖的package包/类
/**
* Create a snapshot file cache for all snapshots under the specified [root]/.snapshot on the
* filesystem.
* <p>
* Immediately loads the file cache.
* @param conf to extract the configured {@link FileSystem} where the snapshots are stored and
* hbase root directory
* @param cacheRefreshPeriod frequency (ms) with which the cache should be refreshed
* @param refreshThreadName name of the cache refresh thread
* @param inspectSnapshotFiles Filter to apply to each snapshot to extract the files.
* @throws IOException if the {@link FileSystem} or root directory cannot be loaded
*/
public SnapshotFileCache(Configuration conf, long cacheRefreshPeriod, String refreshThreadName,
SnapshotFileInspector inspectSnapshotFiles) throws IOException {
this(FSUtils.getCurrentFileSystem(conf), FSUtils.getRootDir(conf), 0, cacheRefreshPeriod,
refreshThreadName, inspectSnapshotFiles);
}