本文整理汇总了Java中org.apache.hadoop.hbase.fs.HFileSystem类的典型用法代码示例。如果您正苦于以下问题:Java HFileSystem类的具体用法?Java HFileSystem怎么用?Java HFileSystem使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
HFileSystem类属于org.apache.hadoop.hbase.fs包,在下文中一共展示了HFileSystem类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: MasterFileSystem
import org.apache.hadoop.hbase.fs.HFileSystem; //导入依赖的package包/类
public MasterFileSystem(Server master, MasterServices services)
throws IOException {
this.conf = master.getConfiguration();
this.master = master;
this.services = services;
// Set filesystem to be that of this.rootdir else we get complaints about
// mismatched filesystems if hbase.rootdir is hdfs and fs.defaultFS is
// default localfs. Presumption is that rootdir is fully-qualified before
// we get to here with appropriate fs scheme.
this.rootdir = FSUtils.getRootDir(conf);
this.tempdir = new Path(this.rootdir, HConstants.HBASE_TEMP_DIRECTORY);
// Cover both bases, the old way of setting default fs and the new.
// We're supposed to run on 0.20 and 0.21 anyways.
this.fs = this.rootdir.getFileSystem(conf);
FSUtils.setFsDefault(conf, new Path(this.fs.getUri()));
// make sure the fs has the same conf
fs.setConf(conf);
// setup the filesystem variable
// set up the archived logs path
this.oldLogDir = createInitialFileSystemLayout();
HFileSystem.addLocationsOrderInterceptor(conf);
this.splitLogManager =
new SplitLogManager(master, master.getConfiguration(), master, services,
master.getServerName());
this.distributedLogReplay = this.splitLogManager.isLogReplaying();
}
示例2: bulkLoadStoreFile
import org.apache.hadoop.hbase.fs.HFileSystem; //导入依赖的package包/类
/**
* Bulk load: Add a specified store file to the specified family. If the source file is on the
* same different file-system is moved from the source location to the destination location,
* otherwise is copied over.
*
* @param familyName Family that will gain the file
* @param srcPath {@link Path} to the file to import
* @param seqNum Bulk Load sequence number
* @return The destination {@link Path} of the bulk loaded file
* @throws IOException
*/
Path bulkLoadStoreFile(final String familyName, Path srcPath, long seqNum) throws IOException {
// Copy the file if it's on another filesystem
FileSystem srcFs = srcPath.getFileSystem(conf);
FileSystem desFs = fs instanceof HFileSystem ? ((HFileSystem) fs).getBackingFs() : fs;
// We can't compare FileSystem instances as equals() includes UGI instance
// as part of the comparison and won't work when doing SecureBulkLoad
// TODO deal with viewFS
if (!FSHDFSUtils.isSameHdfs(conf, srcFs, desFs)) {
LOG.info("Bulk-load file " + srcPath + " is on different filesystem than "
+ "the destination store. Copying file over to destination filesystem.");
Path tmpPath = createTempName();
FileUtil.copy(srcFs, srcPath, fs, tmpPath, false, conf);
LOG.info("Copied " + srcPath + " to temporary path on destination filesystem: " + tmpPath);
srcPath = tmpPath;
}
return commitStoreFile(familyName, srcPath, seqNum, true);
}
示例3: pickReaderVersion
import org.apache.hadoop.hbase.fs.HFileSystem; //导入依赖的package包/类
/**
* Method returns the reader given the specified arguments.
* TODO This is a bad abstraction. See HBASE-6635.
*
* @param path hfile's path
* @param fsdis stream of path's file
* @param size max size of the trailer.
* @param cacheConf Cache configuation values, cannot be null.
* @param hfs
* @return an appropriate instance of HFileReader
* @throws IOException If file is invalid, will throw CorruptHFileException flavored IOException
*/
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "SF_SWITCH_FALLTHROUGH", justification = "Intentional")
private static Reader pickReaderVersion(Path path, FSDataInputStreamWrapper fsdis, long size,
CacheConfig cacheConf, HFileSystem hfs, Configuration conf) throws IOException {
FixedFileTrailer trailer = null;
try {
boolean isHBaseChecksum = fsdis.shouldUseHBaseChecksum();
assert !isHBaseChecksum; // Initially we must read with FS checksum.
trailer = FixedFileTrailer.readFromStream(fsdis.getStream(isHBaseChecksum), size);
switch (trailer.getMajorVersion()) {
case 2:
return new HFileReaderV2(path, trailer, fsdis, size, cacheConf, hfs, conf);
case 3:
return new HFileReaderV3(path, trailer, fsdis, size, cacheConf, hfs, conf);
default:
throw new IllegalArgumentException("Invalid HFile version " + trailer.getMajorVersion());
}
} catch (Throwable t) {
try {
fsdis.close();
} catch (Throwable t2) {
LOG.warn("Error closing fsdis FSDataInputStreamWrapper", t2);
}
throw new CorruptHFileException("Problem reading HFile Trailer from file " + path, t);
}
}
示例4: testNewBlocksHaveDefaultChecksum
import org.apache.hadoop.hbase.fs.HFileSystem; //导入依赖的package包/类
@Test
public void testNewBlocksHaveDefaultChecksum() throws IOException {
Path path = new Path(TEST_UTIL.getDataTestDir(), "default_checksum");
FSDataOutputStream os = fs.create(path);
HFileContext meta = new HFileContextBuilder().build();
HFileBlock.Writer hbw = new HFileBlock.Writer(null, meta);
DataOutputStream dos = hbw.startWriting(BlockType.DATA);
for (int i = 0; i < 1000; ++i)
dos.writeInt(i);
hbw.writeHeaderAndData(os);
int totalSize = hbw.getOnDiskSizeWithHeader();
os.close();
// Use hbase checksums.
assertEquals(true, hfs.useHBaseChecksum());
FSDataInputStreamWrapper is = new FSDataInputStreamWrapper(fs, path);
meta = new HFileContextBuilder().withHBaseCheckSum(true).build();
HFileBlock.FSReader hbr = new HFileBlock.FSReaderImpl(
is, totalSize, (HFileSystem) fs, path, meta);
HFileBlock b = hbr.readBlockData(0, -1, -1, false);
assertEquals(b.getChecksumType(), ChecksumType.getDefaultChecksumType().getCode());
}
示例5: setUp
import org.apache.hadoop.hbase.fs.HFileSystem; //导入依赖的package包/类
@Before
public void setUp() throws IOException {
conf = TEST_UTIL.getConfiguration();
this.conf.set("dfs.datanode.data.dir.perm", "700");
conf.setInt(HFile.FORMAT_VERSION_KEY, HFile.MAX_FORMAT_VERSION);
conf.setInt(HFileBlockIndex.MAX_CHUNK_SIZE_KEY, INDEX_BLOCK_SIZE);
conf.setInt(BloomFilterFactory.IO_STOREFILE_BLOOM_BLOCK_SIZE,
BLOOM_BLOCK_SIZE);
conf.setBoolean(CacheConfig.CACHE_DATA_BLOCKS_COMPRESSED_KEY, cacheCompressedData);
cowType.modifyConf(conf);
fs = HFileSystem.get(conf);
CacheConfig.GLOBAL_BLOCK_CACHE_INSTANCE = blockCache;
cacheConf =
new CacheConfig(blockCache, true, true, cowType.shouldBeCached(BlockType.DATA),
cowType.shouldBeCached(BlockType.LEAF_INDEX),
cowType.shouldBeCached(BlockType.BLOOM_CHUNK), false, cacheCompressedData,
false, false, false);
}
示例6: run
import org.apache.hadoop.hbase.fs.HFileSystem; //导入依赖的package包/类
@Override public int run(String[] args) throws Exception {
if (args.length < 1) {
System.err.println("Usage: Clean <output dir>");
return -1;
}
Path p = new Path(args[0]);
Configuration conf = getConf();
TableName tableName = getTableName(conf);
try (FileSystem fs = HFileSystem.get(conf);
Connection conn = ConnectionFactory.createConnection(conf);
Admin admin = conn.getAdmin()) {
if (admin.tableExists(tableName)) {
admin.disableTable(tableName);
admin.deleteTable(tableName);
}
if (fs.exists(p)) {
fs.delete(p, true);
}
}
return 0;
}
示例7: Reader
import org.apache.hadoop.hbase.fs.HFileSystem; //导入依赖的package包/类
public Reader(FileSystem fs, Path path, HFileLink hfileLink, long size, CacheConfig cacheConf,
DataBlockEncoding preferredEncodingInCache, boolean closeIStream) throws IOException {
super(path);
pWinterPath = path;
FSDataInputStream in = hfileLink.open(fs);
FSDataInputStream inNoChecksum = in;
if (fs instanceof HFileSystem) {
FileSystem noChecksumFs = ((HFileSystem) fs).getNoChecksumFs();
inNoChecksum = hfileLink.open(noChecksumFs);
}
reader =
HFile.createReaderWithEncoding(fs, path, in, inNoChecksum, size, cacheConf,
preferredEncodingInCache, closeIStream);
bloomFilterType = BloomType.NONE;
}
示例8: AbstractHFileReader
import org.apache.hadoop.hbase.fs.HFileSystem; //导入依赖的package包/类
protected AbstractHFileReader(Path path, FixedFileTrailer trailer,
final FSDataInputStream fsdis, final FSDataInputStream fsdisNoFsChecksum,
final long fileSize,
final boolean closeIStream,
final CacheConfig cacheConf, final HFileSystem hfs) {
super(null, path);
this.trailer = trailer;
this.compressAlgo = trailer.getCompressionCodec();
this.cacheConf = cacheConf;
this.fileSize = fileSize;
this.istream = fsdis;
this.closeIStream = closeIStream;
this.path = path;
this.name = path.getName();
this.hfs = hfs;
this.istreamNoFsChecksum = fsdisNoFsChecksum;
}
示例9: FSReaderV2
import org.apache.hadoop.hbase.fs.HFileSystem; //导入依赖的package包/类
public FSReaderV2(FSDataInputStream istream, FSDataInputStream istreamNoFsChecksum,
Algorithm compressAlgo, long fileSize, int minorVersion, HFileSystem hfs, Path path)
throws IOException {
super(istream, istreamNoFsChecksum, compressAlgo, fileSize, minorVersion, hfs, path);
if (hfs != null) {
// Check the configuration to determine whether hbase-level
// checksum verification is needed or not.
useHBaseChecksum = hfs.useHBaseChecksum();
} else {
// The configuration does not specify anything about hbase checksum
// validations. Set it to true here assuming that we will verify
// hbase checksums for all reads. For older files that do not have
// stored checksums, this flag will be reset later.
useHBaseChecksum = true;
}
// for older versions, hbase did not store checksums.
if (getMinorVersion() < MINOR_VERSION_WITH_CHECKSUM) {
useHBaseChecksum = false;
}
this.useHBaseChecksumConfigured = useHBaseChecksum;
}
示例10: setUp
import org.apache.hadoop.hbase.fs.HFileSystem; //导入依赖的package包/类
@Before
public void setUp() throws IOException {
conf = TEST_UTIL.getConfiguration();
conf.setInt(HFile.FORMAT_VERSION_KEY, HFile.MAX_FORMAT_VERSION);
conf.setInt(HFileBlockIndex.MAX_CHUNK_SIZE_KEY, INDEX_BLOCK_SIZE);
conf.setInt(BloomFilterFactory.IO_STOREFILE_BLOOM_BLOCK_SIZE,
BLOOM_BLOCK_SIZE);
conf.setBoolean(CacheConfig.CACHE_BLOCKS_ON_WRITE_KEY,
cowType.shouldBeCached(BlockType.DATA));
conf.setBoolean(CacheConfig.CACHE_INDEX_BLOCKS_ON_WRITE_KEY,
cowType.shouldBeCached(BlockType.LEAF_INDEX));
conf.setBoolean(CacheConfig.CACHE_BLOOM_BLOCKS_ON_WRITE_KEY,
cowType.shouldBeCached(BlockType.BLOOM_CHUNK));
cowType.modifyConf(conf);
fs = HFileSystem.get(conf);
cacheConf = new CacheConfig(conf);
blockCache = cacheConf.getBlockCache();
}
示例11: bulkLoadStoreFile
import org.apache.hadoop.hbase.fs.HFileSystem; //导入依赖的package包/类
/**
* Bulk load: Add a specified store file to the specified family.
* If the source file is on the same different file-system is moved from the
* source location to the destination location, otherwise is copied over.
*
* @param familyName Family that will gain the file
* @param srcPath {@link Path} to the file to import
* @param seqNum Bulk Load sequence number
* @return The destination {@link Path} of the bulk loaded file
* @throws IOException
*/
Path bulkLoadStoreFile(final String familyName, Path srcPath, long seqNum)
throws IOException {
// Copy the file if it's on another filesystem
FileSystem srcFs = srcPath.getFileSystem(conf);
FileSystem desFs = fs instanceof HFileSystem ? ((HFileSystem)fs).getBackingFs() : fs;
// We can't compare FileSystem instances as equals() includes UGI instance
// as part of the comparison and won't work when doing SecureBulkLoad
// TODO deal with viewFS
if (!FSHDFSUtils.isSameHdfs(conf, srcFs, desFs)) {
LOG.info("Bulk-load file " + srcPath + " is on different filesystem than " +
"the destination store. Copying file over to destination filesystem.");
Path tmpPath = createTempName();
FileUtil.copy(srcFs, srcPath, fs, tmpPath, false, conf);
LOG.info("Copied " + srcPath + " to temporary path on destination filesystem: " + tmpPath);
srcPath = tmpPath;
}
return commitStoreFile(familyName, srcPath, seqNum, true);
}
示例12: pickReaderVersion
import org.apache.hadoop.hbase.fs.HFileSystem; //导入依赖的package包/类
/**
* Method returns the reader given the specified arguments.
* TODO This is a bad abstraction. See HBASE-6635.
*
* @param path hfile's path
* @param fsdis stream of path's file
* @param size max size of the trailer.
* @param cacheConf Cache configuation values, cannot be null.
* @param hfs
* @return an appropriate instance of HFileReader
* @throws IOException If file is invalid, will throw CorruptHFileException flavored IOException
*/
private static Reader pickReaderVersion(Path path, FSDataInputStreamWrapper fsdis,
long size, CacheConfig cacheConf, HFileSystem hfs, Configuration conf) throws IOException {
FixedFileTrailer trailer = null;
try {
boolean isHBaseChecksum = fsdis.shouldUseHBaseChecksum();
assert !isHBaseChecksum; // Initially we must read with FS checksum.
trailer = FixedFileTrailer.readFromStream(fsdis.getStream(isHBaseChecksum), size);
switch (trailer.getMajorVersion()) {
case 2:
return new HFileReaderV2(path, trailer, fsdis, size, cacheConf, hfs, conf);
case 3 :
return new HFileReaderV3(path, trailer, fsdis, size, cacheConf, hfs, conf);
default:
throw new IllegalArgumentException("Invalid HFile version " + trailer.getMajorVersion());
}
} catch (Throwable t) {
try {
fsdis.close();
} catch (Throwable t2) {
LOG.warn("Error closing fsdis FSDataInputStreamWrapper", t2);
}
throw new CorruptHFileException("Problem reading HFile Trailer from file " + path, t);
}
}
示例13: setUp
import org.apache.hadoop.hbase.fs.HFileSystem; //导入依赖的package包/类
@Before
public void setUp() throws IOException {
conf = TEST_UTIL.getConfiguration();
this.conf.set("dfs.datanode.data.dir.perm", "700");
conf.setInt(HFile.FORMAT_VERSION_KEY, HFile.MAX_FORMAT_VERSION);
conf.setInt(HFileBlockIndex.MAX_CHUNK_SIZE_KEY, INDEX_BLOCK_SIZE);
conf.setInt(BloomFilterFactory.IO_STOREFILE_BLOOM_BLOCK_SIZE,
BLOOM_BLOCK_SIZE);
conf.setBoolean(CacheConfig.CACHE_DATA_BLOCKS_COMPRESSED_KEY, cacheCompressedData);
cowType.modifyConf(conf);
fs = HFileSystem.get(conf);
cacheConf =
new CacheConfig(blockCache, true, true, cowType.shouldBeCached(BlockType.DATA),
cowType.shouldBeCached(BlockType.LEAF_INDEX),
cowType.shouldBeCached(BlockType.BLOOM_CHUNK), false, cacheCompressedData, true, false);
}
示例14: MasterFileSystem
import org.apache.hadoop.hbase.fs.HFileSystem; //导入依赖的package包/类
public MasterFileSystem(Server master, MasterServices services, boolean masterRecovery)
throws IOException {
this.conf = master.getConfiguration();
this.master = master;
this.services = services;
// Set filesystem to be that of this.rootdir else we get complaints about
// mismatched filesystems if hbase.rootdir is hdfs and fs.defaultFS is
// default localfs. Presumption is that rootdir is fully-qualified before
// we get to here with appropriate fs scheme.
this.rootdir = FSUtils.getRootDir(conf);
this.tempdir = new Path(this.rootdir, HConstants.HBASE_TEMP_DIRECTORY);
// Cover both bases, the old way of setting default fs and the new.
// We're supposed to run on 0.20 and 0.21 anyways.
this.fs = this.rootdir.getFileSystem(conf);
FSUtils.setFsDefault(conf, new Path(this.fs.getUri()));
// make sure the fs has the same conf
fs.setConf(conf);
this.distributedLogReplay = HLogSplitter.isDistributedLogReplay(this.conf);
// setup the filesystem variable
// set up the archived logs path
this.oldLogDir = createInitialFileSystemLayout();
HFileSystem.addLocationsOrderInterceptor(conf);
this.splitLogManager = new SplitLogManager(master.getZooKeeper(),
master.getConfiguration(), master, services,
master.getServerName(), masterRecovery);
}
示例15: setUp
import org.apache.hadoop.hbase.fs.HFileSystem; //导入依赖的package包/类
@Before
public void setUp() throws IOException {
conf = TEST_UTIL.getConfiguration();
this.conf.set("dfs.datanode.data.dir.perm", "700");
conf.setInt(HFile.FORMAT_VERSION_KEY, HFile.MAX_FORMAT_VERSION);
conf.setInt(HFileBlockIndex.MAX_CHUNK_SIZE_KEY, INDEX_BLOCK_SIZE);
conf.setInt(BloomFilterFactory.IO_STOREFILE_BLOOM_BLOCK_SIZE,
BLOOM_BLOCK_SIZE);
conf.setBoolean(CacheConfig.CACHE_BLOCKS_ON_WRITE_KEY,
cowType.shouldBeCached(BlockType.DATA));
conf.setBoolean(CacheConfig.CACHE_INDEX_BLOCKS_ON_WRITE_KEY,
cowType.shouldBeCached(BlockType.LEAF_INDEX));
conf.setBoolean(CacheConfig.CACHE_BLOOM_BLOCKS_ON_WRITE_KEY,
cowType.shouldBeCached(BlockType.BLOOM_CHUNK));
cowType.modifyConf(conf);
fs = HFileSystem.get(conf);
cacheConf = new CacheConfig(conf);
blockCache = cacheConf.getBlockCache();
}