本文整理汇总了Java中org.apache.hadoop.hbase.io.HFileLink.createHFileLinkName方法的典型用法代码示例。如果您正苦于以下问题:Java HFileLink.createHFileLinkName方法的具体用法?Java HFileLink.createHFileLinkName怎么用?Java HFileLink.createHFileLinkName使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hbase.io.HFileLink
的用法示例。
在下文中一共展示了HFileLink.createHFileLinkName方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: addStoreFile
import org.apache.hadoop.hbase.io.HFileLink; //导入方法依赖的package包/类
/**
* Add the specified store file to the stats
* @param region region encoded Name
* @param family family name
* @param hfile store file name
* @return the store file information
*/
FileInfo addStoreFile(final String region, final String family, final String hfile)
throws IOException {
String table = this.snapshot.getTable();
Path path = new Path(family, HFileLink.createHFileLinkName(table, region, hfile));
HFileLink link = new HFileLink(conf, path);
boolean inArchive = false;
long size = -1;
try {
if ((inArchive = fs.exists(link.getArchivePath()))) {
size = fs.getFileStatus(link.getArchivePath()).getLen();
hfileArchiveSize += size;
hfileArchiveCount++;
} else {
size = link.getFileStatus(fs).getLen();
hfileSize += size;
hfilesCount++;
}
} catch (FileNotFoundException e) {
hfilesMissing++;
}
return new FileInfo(inArchive, size);
}
示例2: testHFileLink
import org.apache.hadoop.hbase.io.HFileLink; //导入方法依赖的package包/类
public void testHFileLink() throws IOException {
final HRegionInfo hri = new HRegionInfo(TableName.valueOf("testHFileLinkTb"));
// force temp data in hbase/target/test-data instead of /tmp/hbase-xxxx/
Configuration testConf = new Configuration(this.conf);
FSUtils.setRootDir(testConf, this.testDir);
HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem(
testConf, fs, FSUtils.getTableDir(this.testDir, hri.getTable()), hri);
HFileContext meta = new HFileContextBuilder().withBlockSize(8 * 1024).build();
// Make a store file and write data to it.
StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, this.fs)
.withFilePath(regionFs.createTempName())
.withFileContext(meta)
.build();
writeStoreFile(writer);
Path storeFilePath = regionFs.commitStoreFile(TEST_FAMILY, writer.getPath());
Path dstPath = new Path(regionFs.getTableDir(), new Path("test-region", TEST_FAMILY));
HFileLink.create(testConf, this.fs, dstPath, hri, storeFilePath.getName());
Path linkFilePath = new Path(dstPath,
HFileLink.createHFileLinkName(hri, storeFilePath.getName()));
// Try to open store file from link
StoreFileInfo storeFileInfo = new StoreFileInfo(testConf, this.fs, linkFilePath);
StoreFile hsf = new StoreFile(this.fs, storeFileInfo, testConf, cacheConf,
BloomType.NONE);
assertTrue(storeFileInfo.isLink());
// Now confirm that I can read from the link
int count = 1;
HFileScanner s = hsf.createReader().getScanner(false, false);
s.seekTo();
while (s.next()) {
count++;
}
assertEquals((LAST_CHAR - FIRST_CHAR + 1) * (LAST_CHAR - FIRST_CHAR + 1), count);
}
示例3: verifyStoreFile
import org.apache.hadoop.hbase.io.HFileLink; //导入方法依赖的package包/类
private void verifyStoreFile(final Path snapshotDir, final HRegionInfo regionInfo,
final String family, final String fileName) throws IOException {
Path refPath = null;
if (StoreFile.isReference(fileName)) {
// If is a reference file check if the parent file is present in the snapshot
Path snapshotHFilePath = new Path(new Path(
new Path(snapshotDir, regionInfo.getEncodedName()), family), fileName);
refPath = StoreFile.getReferredToFile(snapshotHFilePath);
if (!fs.exists(refPath)) {
throw new CorruptedSnapshotException("Missing parent hfile for: " + fileName, snapshot);
}
}
Path linkPath;
if (refPath != null && HFileLink.isHFileLink(refPath)) {
linkPath = new Path(family, refPath.getName());
} else if (HFileLink.isHFileLink(fileName)) {
linkPath = new Path(family, fileName);
} else {
linkPath = new Path(family, HFileLink.createHFileLinkName(tableName,
regionInfo.getEncodedName(), fileName));
}
// check if the linked file exists (in the archive, or in the table dir)
HFileLink link = new HFileLink(services.getConfiguration(), linkPath);
if (!link.exists(fs)) {
throw new CorruptedSnapshotException("Can't find hfile: " + fileName
+ " in the real (" + link.getOriginPath() + ") or archive (" + link.getArchivePath()
+ ") directory for the primary table.", snapshot);
}
}
示例4: testHFileLink
import org.apache.hadoop.hbase.io.HFileLink; //导入方法依赖的package包/类
public void testHFileLink() throws IOException {
final String columnFamily = "f";
Configuration testConf = new Configuration(this.conf);
FSUtils.setRootDir(testConf, this.testDir);
HRegionInfo hri = new HRegionInfo(Bytes.toBytes("table-link"));
Path storedir = new Path(new Path(this.testDir,
new Path(hri.getTableNameAsString(), hri.getEncodedName())), columnFamily);
// Make a store file and write data to it.
StoreFile.Writer writer = new StoreFile.WriterBuilder(testConf, cacheConf,
this.fs, 8 * 1024)
.withOutputDir(storedir)
.build();
Path storeFilePath = writer.getPath();
writeStoreFile(writer);
writer.close();
Path dstPath = new Path(this.testDir, new Path("test-region", columnFamily));
HFileLink.create(testConf, this.fs, dstPath, hri, storeFilePath.getName());
Path linkFilePath = new Path(dstPath,
HFileLink.createHFileLinkName(hri, storeFilePath.getName()));
// Try to open store file from link
StoreFile hsf = new StoreFile(this.fs, linkFilePath, testConf, cacheConf,
StoreFile.BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);
assertTrue(hsf.isLink());
// Now confirm that I can read from the link
int count = 1;
HFileScanner s = hsf.createReader().getScanner(false, false);
s.seekTo();
while (s.next()) {
count++;
}
assertEquals((LAST_CHAR - FIRST_CHAR + 1) * (LAST_CHAR - FIRST_CHAR + 1), count);
}
示例5: verifyStoreFile
import org.apache.hadoop.hbase.io.HFileLink; //导入方法依赖的package包/类
private void verifyStoreFile(final Path snapshotDir, final HRegionInfo regionInfo,
final String family, final String fileName) throws IOException {
Path refPath = null;
if (StoreFileInfo.isReference(fileName)) {
// If is a reference file check if the parent file is present in the snapshot
Path snapshotHFilePath = new Path(new Path(
new Path(snapshotDir, regionInfo.getEncodedName()), family), fileName);
refPath = StoreFileInfo.getReferredToFile(snapshotHFilePath);
if (!fs.exists(refPath)) {
throw new CorruptedSnapshotException("Missing parent hfile for: " + fileName, snapshot);
}
}
Path linkPath;
if (refPath != null && HFileLink.isHFileLink(refPath)) {
linkPath = new Path(family, refPath.getName());
} else if (HFileLink.isHFileLink(fileName)) {
linkPath = new Path(family, fileName);
} else {
linkPath = new Path(family, HFileLink.createHFileLinkName(tableName,
regionInfo.getEncodedName(), fileName));
}
// check if the linked file exists (in the archive, or in the table dir)
HFileLink link = new HFileLink(services.getConfiguration(), linkPath);
if (!link.exists(fs)) {
throw new CorruptedSnapshotException("Can't find hfile: " + fileName
+ " in the real (" + link.getOriginPath() + ") or archive (" + link.getArchivePath()
+ ") directory for the primary table.", snapshot);
}
}
示例6: testHFileLink
import org.apache.hadoop.hbase.io.HFileLink; //导入方法依赖的package包/类
@Test
public void testHFileLink() throws IOException {
final HRegionInfo hri = new HRegionInfo(TableName.valueOf("testHFileLinkTb"));
// force temp data in hbase/target/test-data instead of /tmp/hbase-xxxx/
Configuration testConf = new Configuration(this.conf);
FSUtils.setRootDir(testConf, testDir);
HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem(
testConf, fs, FSUtils.getTableDir(testDir, hri.getTable()), hri);
HFileContext meta = new HFileContextBuilder().withBlockSize(8 * 1024).build();
// Make a store file and write data to it.
StoreFileWriter writer = new StoreFileWriter.Builder(conf, cacheConf, this.fs)
.withFilePath(regionFs.createTempName())
.withFileContext(meta)
.build();
writeStoreFile(writer);
Path storeFilePath = regionFs.commitStoreFile(TEST_FAMILY, writer.getPath());
Path dstPath = new Path(regionFs.getTableDir(), new Path("test-region", TEST_FAMILY));
HFileLink.create(testConf, this.fs, dstPath, hri, storeFilePath.getName());
Path linkFilePath = new Path(dstPath,
HFileLink.createHFileLinkName(hri, storeFilePath.getName()));
// Try to open store file from link
StoreFileInfo storeFileInfo = new StoreFileInfo(testConf, this.fs, linkFilePath);
HStoreFile hsf =
new HStoreFile(this.fs, storeFileInfo, testConf, cacheConf, BloomType.NONE, true);
assertTrue(storeFileInfo.isLink());
hsf.initReader();
// Now confirm that I can read from the link
int count = 1;
HFileScanner s = hsf.getReader().getScanner(false, false);
s.seekTo();
while (s.next()) {
count++;
}
assertEquals((LAST_CHAR - FIRST_CHAR + 1) * (LAST_CHAR - FIRST_CHAR + 1), count);
}
示例7: testHFileLink
import org.apache.hadoop.hbase.io.HFileLink; //导入方法依赖的package包/类
public void testHFileLink() throws IOException {
final HRegionInfo hri = new HRegionInfo(TableName.valueOf("testHFileLinkTb"));
// force temp data in hbase/target/test-data instead of /tmp/hbase-xxxx/
Configuration testConf = new Configuration(this.conf);
FSUtils.setRootDir(testConf, this.testDir);
HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem(
testConf, fs, FSUtils.getTableDir(this.testDir, hri.getTable()), hri);
// Make a store file and write data to it.
StoreFile.Writer writer = new StoreFile.WriterBuilder(testConf, cacheConf, this.fs, 8 * 1024)
.withFilePath(regionFs.createTempName())
.build();
writeStoreFile(writer);
Path storeFilePath = regionFs.commitStoreFile(TEST_FAMILY, writer.getPath());
Path dstPath = new Path(regionFs.getTableDir(), new Path("test-region", TEST_FAMILY));
HFileLink.create(testConf, this.fs, dstPath, hri, storeFilePath.getName());
Path linkFilePath = new Path(dstPath,
HFileLink.createHFileLinkName(hri, storeFilePath.getName()));
// Try to open store file from link
StoreFileInfo storeFileInfo = new StoreFileInfo(testConf, this.fs, linkFilePath);
StoreFile hsf = new StoreFile(this.fs, storeFileInfo, testConf, cacheConf,
BloomType.NONE);
assertTrue(storeFileInfo.isLink());
// Now confirm that I can read from the link
int count = 1;
HFileScanner s = hsf.createReader().getScanner(false, false);
s.seekTo();
while (s.next()) {
count++;
}
assertEquals((LAST_CHAR - FIRST_CHAR + 1) * (LAST_CHAR - FIRST_CHAR + 1), count);
}
示例8: restoreReferenceFile
import org.apache.hadoop.hbase.io.HFileLink; //导入方法依赖的package包/类
/**
* Create a new {@link Reference} as copy of the source one.
* <p><blockquote><pre>
* The source table looks like:
* 1234/abc (original file)
* 5678/abc.1234 (reference file)
*
* After the clone operation looks like:
* wxyz/table=1234-abc
* stuv/table=1234-abc.wxyz
*
* NOTE that the region name in the clone changes (md5 of regioninfo)
* and the reference should reflect that change.
* </pre></blockquote>
* @param familyDir destination directory for the store file
* @param regionInfo destination region info for the table
* @param hfileName reference file name
*/
private void restoreReferenceFile(final Path familyDir, final HRegionInfo regionInfo,
final String hfileName) throws IOException {
// Extract the referred information (hfile name and parent region)
String tableName = snapshotDesc.getTable();
Path refPath = StoreFile.getReferredToFile(new Path(new Path(new Path(tableName,
regionInfo.getEncodedName()), familyDir.getName()), hfileName));
String snapshotRegionName = refPath.getParent().getParent().getName();
String fileName = refPath.getName();
// The new reference should have the cloned region name as parent, if it is a clone.
String clonedRegionName = Bytes.toString(regionsMap.get(Bytes.toBytes(snapshotRegionName)));
if (clonedRegionName == null) clonedRegionName = snapshotRegionName;
// The output file should be a reference link table=snapshotRegion-fileName.clonedRegionName
String refLink = fileName;
if (!HFileLink.isHFileLink(fileName)) {
refLink = HFileLink.createHFileLinkName(tableName, snapshotRegionName, fileName);
}
Path outPath = new Path(familyDir, refLink + '.' + clonedRegionName);
// Create the new reference
Path linkPath = new Path(familyDir,
HFileLink.createHFileLinkName(tableName, regionInfo.getEncodedName(), hfileName));
InputStream in = new HFileLink(conf, linkPath).open(fs);
OutputStream out = fs.create(outPath);
IOUtils.copyBytes(in, out, conf);
}
示例9: testHFileLink
import org.apache.hadoop.hbase.io.HFileLink; //导入方法依赖的package包/类
public void testHFileLink() throws IOException {
final String columnFamily = "f";
HRegionInfo hri = new HRegionInfo(Bytes.toBytes("table-link"));
Path storedir = new Path(new Path(FSUtils.getRootDir(conf),
new Path(hri.getTableNameAsString(), hri.getEncodedName())), columnFamily);
// Make a store file and write data to it.
StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf,
this.fs, 8 * 1024)
.withOutputDir(storedir)
.build();
Path storeFilePath = writer.getPath();
writeStoreFile(writer);
writer.close();
Path dstPath = new Path(FSUtils.getRootDir(conf), new Path("test-region", columnFamily));
HFileLink.create(conf, this.fs, dstPath, hri, storeFilePath.getName());
Path linkFilePath = new Path(dstPath,
HFileLink.createHFileLinkName(hri, storeFilePath.getName()));
// Try to open store file from link
StoreFile hsf = new StoreFile(this.fs, linkFilePath, conf, cacheConf,
BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);
assertTrue(hsf.isLink());
// Now confirm that I can read from the link
int count = 1;
HFileScanner s = hsf.createReader().getScanner(false, false);
s.seekTo();
while (s.next()) {
count++;
}
assertEquals((LAST_CHAR - FIRST_CHAR + 1) * (LAST_CHAR - FIRST_CHAR + 1), count);
}
示例10: testReferenceToHFileLink
import org.apache.hadoop.hbase.io.HFileLink; //导入方法依赖的package包/类
/**
* This test creates an hfile and then the dir structures and files to verify that references
* to hfilelinks (created by snapshot clones) can be properly interpreted.
*/
public void testReferenceToHFileLink() throws IOException {
// force temp data in hbase/target/test-data instead of /tmp/hbase-xxxx/
Configuration testConf = new Configuration(this.conf);
FSUtils.setRootDir(testConf, this.testDir);
// adding legal table name chars to verify regex handles it.
HRegionInfo hri = new HRegionInfo(TableName.valueOf("_original-evil-name"));
HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem(
testConf, fs, FSUtils.getTableDir(this.testDir, hri.getTable()), hri);
HFileContext meta = new HFileContextBuilder().withBlockSize(8 * 1024).build();
// Make a store file and write data to it. <root>/<tablename>/<rgn>/<cf>/<file>
StoreFile.Writer writer = new StoreFile.WriterBuilder(testConf, cacheConf, this.fs)
.withFilePath(regionFs.createTempName())
.withFileContext(meta)
.build();
writeStoreFile(writer);
Path storeFilePath = regionFs.commitStoreFile(TEST_FAMILY, writer.getPath());
// create link to store file. <root>/clone/region/<cf>/<hfile>-<region>-<table>
HRegionInfo hriClone = new HRegionInfo(TableName.valueOf("clone"));
HRegionFileSystem cloneRegionFs = HRegionFileSystem.createRegionOnFileSystem(
testConf, fs, FSUtils.getTableDir(this.testDir, hri.getTable()),
hriClone);
Path dstPath = cloneRegionFs.getStoreDir(TEST_FAMILY);
HFileLink.create(testConf, this.fs, dstPath, hri, storeFilePath.getName());
Path linkFilePath = new Path(dstPath,
HFileLink.createHFileLinkName(hri, storeFilePath.getName()));
// create splits of the link.
// <root>/clone/splitA/<cf>/<reftohfilelink>,
// <root>/clone/splitB/<cf>/<reftohfilelink>
HRegionInfo splitHriA = new HRegionInfo(hri.getTable(), null, SPLITKEY);
HRegionInfo splitHriB = new HRegionInfo(hri.getTable(), SPLITKEY, null);
StoreFile f = new StoreFile(fs, linkFilePath, testConf, cacheConf, BloomType.NONE);
f.createReader();
Path pathA = splitStoreFile(cloneRegionFs, splitHriA, TEST_FAMILY, f, SPLITKEY, true); // top
Path pathB = splitStoreFile(cloneRegionFs, splitHriB, TEST_FAMILY, f, SPLITKEY, false);// bottom
f.closeReader(true);
// OK test the thing
FSUtils.logFileSystemState(fs, this.testDir, LOG);
// There is a case where a file with the hfilelink pattern is actually a daughter
// reference to a hfile link. This code in StoreFile that handles this case.
// Try to open store file from link
StoreFile hsfA = new StoreFile(this.fs, pathA, testConf, cacheConf,
BloomType.NONE);
// Now confirm that I can read from the ref to link
int count = 1;
HFileScanner s = hsfA.createReader().getScanner(false, false);
s.seekTo();
while (s.next()) {
count++;
}
assertTrue(count > 0); // read some rows here
// Try to open store file from link
StoreFile hsfB = new StoreFile(this.fs, pathB, testConf, cacheConf,
BloomType.NONE);
// Now confirm that I can read from the ref to link
HFileScanner sB = hsfB.createReader().getScanner(false, false);
sB.seekTo();
//count++ as seekTo() will advance the scanner
count++;
while (sB.next()) {
count++;
}
// read the rest of the rows
assertEquals((LAST_CHAR - FIRST_CHAR + 1) * (LAST_CHAR - FIRST_CHAR + 1), count);
}
示例11: restoreReferenceFile
import org.apache.hadoop.hbase.io.HFileLink; //导入方法依赖的package包/类
/**
* Create a new {@link Reference} as copy of the source one.
* <p><blockquote><pre>
* The source table looks like:
* 1234/abc (original file)
* 5678/abc.1234 (reference file)
*
* After the clone operation looks like:
* wxyz/table=1234-abc
* stuv/table=1234-abc.wxyz
*
* NOTE that the region name in the clone changes (md5 of regioninfo)
* and the reference should reflect that change.
* </pre></blockquote>
* @param familyDir destination directory for the store file
* @param regionInfo destination region info for the table
* @param hfileName reference file name
*/
private void restoreReferenceFile(final Path familyDir, final HRegionInfo regionInfo,
final String hfileName) throws IOException {
// Extract the referred information (hfile name and parent region)
String snapshotTable = snapshotDesc.getTable();
Path refPath = StoreFile.getReferredToFile(new Path(new Path(new Path(
snapshotTable, regionInfo.getEncodedName()), familyDir.getName()),
hfileName));
String snapshotRegionName = refPath.getParent().getParent().getName();
String fileName = refPath.getName();
// The new reference should have the cloned region name as parent, if it is a clone.
String clonedRegionName = Bytes.toString(regionsMap.get(Bytes.toBytes(snapshotRegionName)));
if (clonedRegionName == null) clonedRegionName = snapshotRegionName;
// The output file should be a reference link table=snapshotRegion-fileName.clonedRegionName
Path linkPath = null;
String refLink = fileName;
if (!HFileLink.isHFileLink(fileName)) {
refLink = HFileLink.createHFileLinkName(snapshotTable, snapshotRegionName, fileName);
linkPath = new Path(familyDir,
HFileLink.createHFileLinkName(snapshotTable, regionInfo.getEncodedName(), hfileName));
}
Path outPath = new Path(familyDir, refLink + '.' + clonedRegionName);
// Create the new reference
InputStream in;
if (linkPath != null) {
in = new HFileLink(conf, linkPath).open(fs);
} else {
linkPath = new Path(new Path(HRegion.getRegionDir(snapshotDir, regionInfo.getEncodedName()),
familyDir.getName()), hfileName);
in = fs.open(linkPath);
}
OutputStream out = fs.create(outPath);
IOUtils.copyBytes(in, out, conf);
// Add the daughter region to the map
String regionName = Bytes.toString(regionsMap.get(regionInfo.getEncodedNameAsBytes()));
LOG.debug("Restore reference " + regionName + " to " + clonedRegionName);
synchronized (parentsMap) {
Pair<String, String> daughters = parentsMap.get(clonedRegionName);
if (daughters == null) {
daughters = new Pair<String, String>(regionName, null);
parentsMap.put(clonedRegionName, daughters);
} else if (!regionName.equals(daughters.getFirst())) {
daughters.setSecond(regionName);
}
}
}
示例12: testReferenceToHFileLink
import org.apache.hadoop.hbase.io.HFileLink; //导入方法依赖的package包/类
/**
* This test creates an hfile and then the dir structures and files to verify that references
* to hfilelinks (created by snapshot clones) can be properly interpreted.
*/
public void testReferenceToHFileLink() throws IOException {
final String columnFamily = "f";
Path rootDir = FSUtils.getRootDir(conf);
String tablename = "_original-evil-name"; // adding legal table name chars to verify regex handles it.
HRegionInfo hri = new HRegionInfo(Bytes.toBytes(tablename));
// store dir = <root>/<tablename>/<rgn>/<cf>
Path storedir = new Path(new Path(rootDir,
new Path(hri.getTableNameAsString(), hri.getEncodedName())), columnFamily);
// Make a store file and write data to it. <root>/<tablename>/<rgn>/<cf>/<file>
StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf,
this.fs, 8 * 1024)
.withOutputDir(storedir)
.build();
Path storeFilePath = writer.getPath();
writeStoreFile(writer);
writer.close();
// create link to store file. <root>/clone/region/<cf>/<hfile>-<region>-<table>
String target = "clone";
Path dstPath = new Path(rootDir, new Path(new Path(target, "7e0102"), columnFamily));
HFileLink.create(conf, this.fs, dstPath, hri, storeFilePath.getName());
Path linkFilePath = new Path(dstPath,
HFileLink.createHFileLinkName(hri, storeFilePath.getName()));
// create splits of the link.
// <root>/clone/splitA/<cf>/<reftohfilelink>,
// <root>/clone/splitB/<cf>/<reftohfilelink>
Path splitDirA = new Path(new Path(rootDir,
new Path(target, "571A")), columnFamily);
Path splitDirB = new Path(new Path(rootDir,
new Path(target, "571B")), columnFamily);
StoreFile f = new StoreFile(fs, linkFilePath, conf, cacheConf, BloomType.NONE,
NoOpDataBlockEncoder.INSTANCE);
byte[] splitRow = SPLITKEY;
Path pathA = StoreFile.split(fs, splitDirA, f, splitRow, Range.top); // top
Path pathB = StoreFile.split(fs, splitDirB, f, splitRow, Range.bottom); // bottom
// OK test the thing
FSUtils.logFileSystemState(fs, rootDir, LOG);
// There is a case where a file with the hfilelink pattern is actually a daughter
// reference to a hfile link. This code in StoreFile that handles this case.
// Try to open store file from link
StoreFile hsfA = new StoreFile(this.fs, pathA, conf, cacheConf,
StoreFile.BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);
// Now confirm that I can read from the ref to link
int count = 1;
HFileScanner s = hsfA.createReader().getScanner(false, false);
s.seekTo();
while (s.next()) {
count++;
}
assertTrue(count > 0); // read some rows here
// Try to open store file from link
StoreFile hsfB = new StoreFile(this.fs, pathB, conf, cacheConf,
StoreFile.BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);
// Now confirm that I can read from the ref to link
HFileScanner sB = hsfB.createReader().getScanner(false, false);
sB.seekTo();
//count++ as seekTo() will advance the scanner
count++;
while (sB.next()) {
count++;
}
// read the rest of the rows
assertEquals((LAST_CHAR - FIRST_CHAR + 1) * (LAST_CHAR - FIRST_CHAR + 1), count);
}
示例13: restoreReferenceFile
import org.apache.hadoop.hbase.io.HFileLink; //导入方法依赖的package包/类
/**
* Create a new {@link Reference} as copy of the source one.
* <p><blockquote><pre>
* The source table looks like:
* 1234/abc (original file)
* 5678/abc.1234 (reference file)
*
* After the clone operation looks like:
* wxyz/table=1234-abc
* stuv/table=1234-abc.wxyz
*
* NOTE that the region name in the clone changes (md5 of regioninfo)
* and the reference should reflect that change.
* </pre></blockquote>
* @param familyDir destination directory for the store file
* @param regionInfo destination region info for the table
* @param hfileName reference file name
*/
private void restoreReferenceFile(final Path familyDir, final HRegionInfo regionInfo,
final SnapshotRegionManifest.StoreFile storeFile) throws IOException {
String hfileName = storeFile.getName();
// Extract the referred information (hfile name and parent region)
Path refPath = StoreFileInfo.getReferredToFile(new Path(new Path(new Path(
snapshotTable.getNameAsString(), regionInfo.getEncodedName()), familyDir.getName()),
hfileName));
String snapshotRegionName = refPath.getParent().getParent().getName();
String fileName = refPath.getName();
// The new reference should have the cloned region name as parent, if it is a clone.
String clonedRegionName = Bytes.toString(regionsMap.get(Bytes.toBytes(snapshotRegionName)));
if (clonedRegionName == null) clonedRegionName = snapshotRegionName;
// The output file should be a reference link table=snapshotRegion-fileName.clonedRegionName
Path linkPath = null;
String refLink = fileName;
if (!HFileLink.isHFileLink(fileName)) {
refLink = HFileLink.createHFileLinkName(snapshotTable, snapshotRegionName, fileName);
linkPath = new Path(familyDir,
HFileLink.createHFileLinkName(snapshotTable, regionInfo.getEncodedName(), hfileName));
}
Path outPath = new Path(familyDir, refLink + '.' + clonedRegionName);
// Create the new reference
if (storeFile.hasReference()) {
Reference reference = Reference.convert(storeFile.getReference());
reference.write(fs, outPath);
} else {
InputStream in;
if (linkPath != null) {
in = new HFileLink(conf, linkPath).open(fs);
} else {
linkPath = new Path(new Path(HRegion.getRegionDir(snapshotManifest.getSnapshotDir(),
regionInfo.getEncodedName()), familyDir.getName()), hfileName);
in = fs.open(linkPath);
}
OutputStream out = fs.create(outPath);
IOUtils.copyBytes(in, out, conf);
}
// Add the daughter region to the map
String regionName = Bytes.toString(regionsMap.get(regionInfo.getEncodedNameAsBytes()));
LOG.debug("Restore reference " + regionName + " to " + clonedRegionName);
synchronized (parentsMap) {
Pair<String, String> daughters = parentsMap.get(clonedRegionName);
if (daughters == null) {
daughters = new Pair<String, String>(regionName, null);
parentsMap.put(clonedRegionName, daughters);
} else if (!regionName.equals(daughters.getFirst())) {
daughters.setSecond(regionName);
}
}
}
示例14: testReferenceToHFileLink
import org.apache.hadoop.hbase.io.HFileLink; //导入方法依赖的package包/类
/**
* This test creates an hfile and then the dir structures and files to verify that references
* to hfilelinks (created by snapshot clones) can be properly interpreted.
*/
public void testReferenceToHFileLink() throws IOException {
// force temp data in hbase/target/test-data instead of /tmp/hbase-xxxx/
Configuration testConf = new Configuration(this.conf);
FSUtils.setRootDir(testConf, this.testDir);
// adding legal table name chars to verify regex handles it.
HRegionInfo hri = new HRegionInfo(TableName.valueOf("_original-evil-name"));
HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem(
testConf, fs, FSUtils.getTableDir(this.testDir, hri.getTable()), hri);
HFileContext meta = new HFileContextBuilder().withBlockSize(8 * 1024).build();
// Make a store file and write data to it. <root>/<tablename>/<rgn>/<cf>/<file>
StoreFile.Writer writer = new StoreFile.WriterBuilder(testConf, cacheConf, this.fs)
.withFilePath(regionFs.createTempName())
.withFileContext(meta)
.build();
writeStoreFile(writer);
Path storeFilePath = regionFs.commitStoreFile(TEST_FAMILY, writer.getPath());
// create link to store file. <root>/clone/region/<cf>/<hfile>-<region>-<table>
HRegionInfo hriClone = new HRegionInfo(TableName.valueOf("clone"));
HRegionFileSystem cloneRegionFs = HRegionFileSystem.createRegionOnFileSystem(
testConf, fs, FSUtils.getTableDir(this.testDir, hri.getTable()),
hriClone);
Path dstPath = cloneRegionFs.getStoreDir(TEST_FAMILY);
HFileLink.create(testConf, this.fs, dstPath, hri, storeFilePath.getName());
Path linkFilePath = new Path(dstPath,
HFileLink.createHFileLinkName(hri, storeFilePath.getName()));
// create splits of the link.
// <root>/clone/splitA/<cf>/<reftohfilelink>,
// <root>/clone/splitB/<cf>/<reftohfilelink>
HRegionInfo splitHriA = new HRegionInfo(hri.getTable(), null, SPLITKEY);
HRegionInfo splitHriB = new HRegionInfo(hri.getTable(), SPLITKEY, null);
StoreFile f = new StoreFile(fs, linkFilePath, testConf, cacheConf, BloomType.NONE);
Path pathA = splitStoreFile(cloneRegionFs, splitHriA, TEST_FAMILY, f, SPLITKEY, true); // top
Path pathB = splitStoreFile(cloneRegionFs, splitHriB, TEST_FAMILY, f, SPLITKEY, false);// bottom
// OK test the thing
FSUtils.logFileSystemState(fs, this.testDir, LOG);
// There is a case where a file with the hfilelink pattern is actually a daughter
// reference to a hfile link. This code in StoreFile that handles this case.
// Try to open store file from link
StoreFile hsfA = new StoreFile(this.fs, pathA, testConf, cacheConf,
BloomType.NONE);
// Now confirm that I can read from the ref to link
int count = 1;
HFileScanner s = hsfA.createReader().getScanner(false, false);
s.seekTo();
while (s.next()) {
count++;
}
assertTrue(count > 0); // read some rows here
// Try to open store file from link
StoreFile hsfB = new StoreFile(this.fs, pathB, testConf, cacheConf,
BloomType.NONE);
// Now confirm that I can read from the ref to link
HFileScanner sB = hsfB.createReader().getScanner(false, false);
sB.seekTo();
//count++ as seekTo() will advance the scanner
count++;
while (sB.next()) {
count++;
}
// read the rest of the rows
assertEquals((LAST_CHAR - FIRST_CHAR + 1) * (LAST_CHAR - FIRST_CHAR + 1), count);
}
示例15: restoreReferenceFile
import org.apache.hadoop.hbase.io.HFileLink; //导入方法依赖的package包/类
/**
* Create a new {@link Reference} as copy of the source one.
* <p><blockquote><pre>
* The source table looks like:
* 1234/abc (original file)
* 5678/abc.1234 (reference file)
*
* After the clone operation looks like:
* wxyz/table=1234-abc
* stuv/table=1234-abc.wxyz
*
* NOTE that the region name in the clone changes (md5 of regioninfo)
* and the reference should reflect that change.
* </pre></blockquote>
* @param familyDir destination directory for the store file
* @param regionInfo destination region info for the table
* @param hfileName reference file name
*/
private void restoreReferenceFile(final Path familyDir, final HRegionInfo regionInfo,
final String hfileName) throws IOException {
// Extract the referred information (hfile name and parent region)
Path refPath = StoreFileInfo.getReferredToFile(new Path(new Path(new Path(
snapshotTable.getNameAsString(), regionInfo.getEncodedName()), familyDir.getName()),
hfileName));
String snapshotRegionName = refPath.getParent().getParent().getName();
String fileName = refPath.getName();
// The new reference should have the cloned region name as parent, if it is a clone.
String clonedRegionName = Bytes.toString(regionsMap.get(Bytes.toBytes(snapshotRegionName)));
if (clonedRegionName == null) clonedRegionName = snapshotRegionName;
// The output file should be a reference link table=snapshotRegion-fileName.clonedRegionName
Path linkPath = null;
String refLink = fileName;
if (!HFileLink.isHFileLink(fileName)) {
refLink = HFileLink.createHFileLinkName(snapshotTable, snapshotRegionName, fileName);
linkPath = new Path(familyDir,
HFileLink.createHFileLinkName(snapshotTable, regionInfo.getEncodedName(), hfileName));
}
Path outPath = new Path(familyDir, refLink + '.' + clonedRegionName);
// Create the new reference
InputStream in;
if (linkPath != null) {
in = new HFileLink(conf, linkPath).open(fs);
} else {
linkPath = new Path(new Path(HRegion.getRegionDir(snapshotDir, regionInfo.getEncodedName()),
familyDir.getName()), hfileName);
in = fs.open(linkPath);
}
OutputStream out = fs.create(outPath);
IOUtils.copyBytes(in, out, conf);
// Add the daughter region to the map
String regionName = Bytes.toString(regionsMap.get(regionInfo.getEncodedNameAsBytes()));
LOG.debug("Restore reference " + regionName + " to " + clonedRegionName);
synchronized (parentsMap) {
Pair<String, String> daughters = parentsMap.get(clonedRegionName);
if (daughters == null) {
daughters = new Pair<String, String>(regionName, null);
parentsMap.put(clonedRegionName, daughters);
} else if (!regionName.equals(daughters.getFirst())) {
daughters.setSecond(regionName);
}
}
}