本文整理汇总了Java中org.apache.hadoop.hbase.io.FileLink.open方法的典型用法代码示例。如果您正苦于以下问题:Java FileLink.open方法的具体用法?Java FileLink.open怎么用?Java FileLink.open使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hbase.io.FileLink
的用法示例。
在下文中一共展示了FileLink.open方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: processRegion
import org.apache.hadoop.hbase.io.FileLink; //导入方法依赖的package包/类
/**
* Each region is processed by a separate handler. If a HRegion has a hfileV1, its path is
* returned as the future result, otherwise, a null value is returned.
* @param regionDir Region to process.
* @return corresponding Future object.
*/
private Future<Path> processRegion(final Path regionDir) {
LOG.debug("processing region: " + regionDir);
Callable<Path> regionCallable = new Callable<Path>() {
@Override
public Path call() throws Exception {
for (Path familyDir : FSUtils.getFamilyDirs(fs, regionDir)) {
FileStatus[] storeFiles = FSUtils.listStatus(fs, familyDir);
if (storeFiles == null || storeFiles.length == 0) continue;
for (FileStatus storeFile : storeFiles) {
Path storeFilePath = storeFile.getPath();
FSDataInputStream fsdis = null;
long lenToRead = 0;
try {
// check whether this path is a reference.
if (StoreFileInfo.isReference(storeFilePath)) continue;
// check whether this path is a HFileLink.
else if (HFileLink.isHFileLink(storeFilePath)) {
FileLink fLink = getFileLinkWithPreNSPath(storeFilePath);
fsdis = fLink.open(fs);
lenToRead = fLink.getFileStatus(fs).getLen();
} else {
// a regular hfile
fsdis = fs.open(storeFilePath);
lenToRead = storeFile.getLen();
}
int majorVersion = computeMajorVersion(fsdis, lenToRead);
if (majorVersion == 1) {
hFileV1Set.add(storeFilePath);
// return this region path, as it needs to be compacted.
return regionDir;
}
if (majorVersion > 2 || majorVersion < 1) throw new IllegalArgumentException(
"Incorrect major version: " + majorVersion);
} catch (Exception iae) {
corruptedHFiles.add(storeFilePath);
LOG.error("Got exception while reading trailer for file: "+ storeFilePath, iae);
} finally {
if (fsdis != null) fsdis.close();
}
}
}
return null;
}
private int computeMajorVersion(FSDataInputStream istream, long fileSize)
throws IOException {
//read up the last int of the file. Major version is in the last 3 bytes.
long seekPoint = fileSize - Bytes.SIZEOF_INT;
if (seekPoint < 0)
throw new IllegalArgumentException("File too small, no major version found");
// Read the version from the last int of the file.
istream.seek(seekPoint);
int version = istream.readInt();
// Extract and return the major version
return version & 0x00ffffff;
}
};
Future<Path> f = exec.submit(regionCallable);
return f;
}
示例2: testLinkReadDuringRename
import org.apache.hadoop.hbase.io.FileLink; //导入方法依赖的package包/类
/**
* Test that link is still readable even when the current file gets renamed.
*/
private void testLinkReadDuringRename(FileSystem fs, Path rootDir) throws IOException {
Path originalPath = new Path(rootDir, "test.file");
Path archivedPath = new Path(rootDir, "archived.file");
writeSomeData(fs, originalPath, 256 << 20, (byte)2);
List<Path> files = new ArrayList<Path>();
files.add(originalPath);
files.add(archivedPath);
FileLink link = new FileLink(files);
FSDataInputStream in = link.open(fs);
try {
byte[] data = new byte[8192];
long size = 0;
// Read from origin
int n = in.read(data);
dataVerify(data, n, (byte)2);
size += n;
// Move origin to archive
assertFalse(fs.exists(archivedPath));
fs.rename(originalPath, archivedPath);
assertFalse(fs.exists(originalPath));
assertTrue(fs.exists(archivedPath));
// Try to read to the end
while ((n = in.read(data)) > 0) {
dataVerify(data, n, (byte)2);
size += n;
}
assertEquals(256 << 20, size);
} finally {
in.close();
if (fs.exists(originalPath)) fs.delete(originalPath);
if (fs.exists(archivedPath)) fs.delete(archivedPath);
}
}
示例3: testHDFSLinkReadDuringDelete
import org.apache.hadoop.hbase.io.FileLink; //导入方法依赖的package包/类
/**
* Test that link is still readable even when the current file gets deleted.
*
* NOTE: This test is valid only on HDFS.
* When a file is deleted from a local file-system, it is simply 'unlinked'.
* The inode, which contains the file's data, is not deleted until all
* processes have finished with it.
* In HDFS when the request exceed the cached block locations,
* a query to the namenode is performed, using the filename,
* and the deleted file doesn't exists anymore (FileNotFoundException).
*/
@Test
public void testHDFSLinkReadDuringDelete() throws Exception {
HBaseTestingUtility testUtil = new HBaseTestingUtility();
Configuration conf = testUtil.getConfiguration();
conf.setInt("dfs.blocksize", 1024 * 1024);
conf.setInt("dfs.client.read.prefetch.size", 2 * 1024 * 1024);
testUtil.startMiniDFSCluster(1);
MiniDFSCluster cluster = testUtil.getDFSCluster();
FileSystem fs = cluster.getFileSystem();
assertEquals("hdfs", fs.getUri().getScheme());
try {
List<Path> files = new ArrayList<Path>();
for (int i = 0; i < 3; i++) {
Path path = new Path(String.format("test-data-%d", i));
writeSomeData(fs, path, 1 << 20, (byte)i);
files.add(path);
}
FileLink link = new FileLink(files);
FSDataInputStream in = link.open(fs);
try {
byte[] data = new byte[8192];
int n;
// Switch to file 1
n = in.read(data);
dataVerify(data, n, (byte)0);
fs.delete(files.get(0));
skipBuffer(in, (byte)0);
// Switch to file 2
n = in.read(data);
dataVerify(data, n, (byte)1);
fs.delete(files.get(1));
skipBuffer(in, (byte)1);
// Switch to file 3
n = in.read(data);
dataVerify(data, n, (byte)2);
fs.delete(files.get(2));
skipBuffer(in, (byte)2);
// No more files available
try {
n = in.read(data);
assert(n <= 0);
} catch (FileNotFoundException e) {
assertTrue(true);
}
} finally {
in.close();
}
} finally {
testUtil.shutdownMiniCluster();
}
}