本文整理汇总了Java中org.apache.hadoop.hbase.util.FSTableDescriptors.getTableDescriptorFromFs方法的典型用法代码示例。如果您正苦于以下问题:Java FSTableDescriptors.getTableDescriptorFromFs方法的具体用法?Java FSTableDescriptors.getTableDescriptorFromFs怎么用?Java FSTableDescriptors.getTableDescriptorFromFs使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hbase.util.FSTableDescriptors
的用法示例。
在下文中一共展示了FSTableDescriptors.getTableDescriptorFromFs方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: verifyHColumnDescriptor
import org.apache.hadoop.hbase.util.FSTableDescriptors; //导入方法依赖的package包/类
private void verifyHColumnDescriptor(int expected, final TableName tableName,
final byte[]... families) throws IOException {
Admin admin = TEST_UTIL.getHBaseAdmin();
// Verify descriptor from master
HTableDescriptor htd = admin.getTableDescriptor(tableName);
HColumnDescriptor[] hcds = htd.getColumnFamilies();
verifyHColumnDescriptor(expected, hcds, tableName, families);
// Verify descriptor from HDFS
MasterFileSystem mfs = TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterFileSystem();
Path tableDir = FSUtils.getTableDir(mfs.getRootDir(), tableName);
htd = FSTableDescriptors.getTableDescriptorFromFs(mfs.getFileSystem(), tableDir);
hcds = htd.getColumnFamilies();
verifyHColumnDescriptor(expected, hcds, tableName, families);
}
示例2: call
import org.apache.hadoop.hbase.util.FSTableDescriptors; //导入方法依赖的package包/类
@Override
public Void call() throws Exception {
LOG.debug("Running table info copy.");
this.rethrowException();
LOG.debug("Attempting to copy table info for snapshot:"
+ ClientSnapshotDescriptionUtils.toString(this.snapshot));
// get the HTable descriptor
HTableDescriptor orig = FSTableDescriptors.getTableDescriptorFromFs(fs, rootDir,
TableName.valueOf(this.snapshot.getTable()));
this.rethrowException();
// write a copy of descriptor to the snapshot directory
Path snapshotDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(snapshot, rootDir);
new FSTableDescriptors(fs, rootDir)
.createTableDescriptorForTableDirectory(snapshotDir, orig, false);
LOG.debug("Finished copying tableinfo.");
return null;
}
示例3: verifyHColumnDescriptor
import org.apache.hadoop.hbase.util.FSTableDescriptors; //导入方法依赖的package包/类
private void verifyHColumnDescriptor(int expected, final TableName tableName,
final byte[]... families) throws IOException {
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
// Verify descriptor from master
HTableDescriptor htd = admin.getTableDescriptor(tableName);
HColumnDescriptor[] hcds = htd.getColumnFamilies();
verifyHColumnDescriptor(expected, hcds, tableName, families);
// Verify descriptor from HDFS
MasterFileSystem mfs = TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterFileSystem();
Path tableDir = FSUtils.getTableDir(mfs.getRootDir(), tableName);
htd = FSTableDescriptors.getTableDescriptorFromFs(mfs.getFileSystem(), tableDir);
hcds = htd.getColumnFamilies();
verifyHColumnDescriptor(expected, hcds, tableName, families);
}
示例4: verifyHColumnDescriptor
import org.apache.hadoop.hbase.util.FSTableDescriptors; //导入方法依赖的package包/类
private void verifyHColumnDescriptor(int expected, final TableName tableName,
final byte[]... families) throws IOException {
Admin admin = TEST_UTIL.getAdmin();
// Verify descriptor from master
TableDescriptor htd = admin.getDescriptor(tableName);
ColumnFamilyDescriptor[] hcds = htd.getColumnFamilies();
verifyHColumnDescriptor(expected, hcds, tableName, families);
// Verify descriptor from HDFS
MasterFileSystem mfs = TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterFileSystem();
Path tableDir = FSUtils.getTableDir(mfs.getRootDir(), tableName);
TableDescriptor td = FSTableDescriptors.getTableDescriptorFromFs(mfs.getFileSystem(), tableDir);
hcds = td.getColumnFamilies();
verifyHColumnDescriptor(expected, hcds, tableName, families);
}
示例5: compactTable
import org.apache.hadoop.hbase.util.FSTableDescriptors; //导入方法依赖的package包/类
private void compactTable(final Path tableDir, final boolean compactOnce, final boolean major)
throws IOException {
HTableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir);
for (Path regionDir: FSUtils.getRegionDirs(fs, tableDir)) {
compactRegion(tableDir, htd, regionDir, compactOnce, major);
}
}
示例6: verifyTableDescriptor
import org.apache.hadoop.hbase.util.FSTableDescriptors; //导入方法依赖的package包/类
private void verifyTableDescriptor(final TableName tableName,
final byte[]... families) throws IOException {
Admin admin = TEST_UTIL.getHBaseAdmin();
// Verify descriptor from master
HTableDescriptor htd = admin.getTableDescriptor(tableName);
verifyTableDescriptor(htd, tableName, families);
// Verify descriptor from HDFS
MasterFileSystem mfs = TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterFileSystem();
Path tableDir = FSUtils.getTableDir(mfs.getRootDir(), tableName);
htd = FSTableDescriptors.getTableDescriptorFromFs(mfs.getFileSystem(), tableDir);
verifyTableDescriptor(htd, tableName, families);
}
示例7: init
import org.apache.hadoop.hbase.util.FSTableDescriptors; //导入方法依赖的package包/类
private void init() throws IOException {
Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, rootDir);
//load table descriptor
htd = FSTableDescriptors.getTableDescriptorFromFs(fs, snapshotDir);
Set<String> snapshotRegionNames
= SnapshotReferenceUtil.getSnapshotRegionNames(fs, snapshotDir);
if (snapshotRegionNames == null) {
throw new IllegalArgumentException("Snapshot seems empty");
}
regions = new ArrayList<HRegionInfo>(snapshotRegionNames.size());
for (String regionName : snapshotRegionNames) {
// load region descriptor
Path regionDir = new Path(snapshotDir, regionName);
HRegionInfo hri = HRegionFileSystem.loadRegionInfoFileContent(fs,
regionDir);
if (CellUtil.overlappingKeys(scan.getStartRow(), scan.getStopRow(),
hri.getStartKey(), hri.getEndKey())) {
regions.add(hri);
}
}
// sort for regions according to startKey.
Collections.sort(regions);
initScanMetrics(scan);
RestoreSnapshotHelper.copySnapshotForScanner(conf, fs,
rootDir, restoreDir, snapshotName);
}
示例8: loadSnapshotInfo
import org.apache.hadoop.hbase.util.FSTableDescriptors; //导入方法依赖的package包/类
/**
* Load snapshot info and table descriptor for the specified snapshot
* @param snapshotName name of the snapshot to load
* @return false if snapshot is not found
*/
private boolean loadSnapshotInfo(final String snapshotName) throws IOException {
snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, rootDir);
if (!fs.exists(snapshotDir)) {
LOG.warn("Snapshot '" + snapshotName + "' not found in: " + snapshotDir);
return false;
}
snapshotDesc = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir);
snapshotTableDesc = FSTableDescriptors.getTableDescriptorFromFs(fs, snapshotDir);
return true;
}
示例9: copySnapshotForScanner
import org.apache.hadoop.hbase.util.FSTableDescriptors; //导入方法依赖的package包/类
/**
* Copy the snapshot files for a snapshot scanner, discards meta changes.
* @param conf
* @param fs
* @param rootDir
* @param restoreDir
* @param snapshotName
* @throws IOException
*/
public static void copySnapshotForScanner(Configuration conf, FileSystem fs, Path rootDir,
Path restoreDir, String snapshotName) throws IOException {
// ensure that restore dir is not under root dir
if (!restoreDir.getFileSystem(conf).getUri().equals(rootDir.getFileSystem(conf).getUri())) {
throw new IllegalArgumentException("Filesystems for restore directory and HBase root directory " +
"should be the same");
}
if (restoreDir.toUri().getPath().startsWith(rootDir.toUri().getPath())) {
throw new IllegalArgumentException("Restore directory cannot be a sub directory of HBase " +
"root directory. RootDir: " + rootDir + ", restoreDir: " + restoreDir);
}
Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, rootDir);
SnapshotDescription snapshotDesc = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir);
//load table descriptor
HTableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(fs, snapshotDir);
MonitoredTask status = TaskMonitor.get().createStatus(
"Restoring snapshot '" + snapshotName + "' to directory " + restoreDir);
ForeignExceptionDispatcher monitor = new ForeignExceptionDispatcher();
RestoreSnapshotHelper helper = new RestoreSnapshotHelper(conf, fs, snapshotDesc,
snapshotDir, htd, restoreDir, monitor, status);
helper.restoreHdfsRegions(); // TODO: parallelize.
if (LOG.isDebugEnabled()) {
LOG.debug("Restored table dir:" + restoreDir);
FSUtils.logFileSystemState(fs, restoreDir, LOG);
}
}
示例10: verifyTableDescriptor
import org.apache.hadoop.hbase.util.FSTableDescriptors; //导入方法依赖的package包/类
private void verifyTableDescriptor(final TableName tableName,
final byte[]... families) throws IOException {
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
// Verify descriptor from master
HTableDescriptor htd = admin.getTableDescriptor(tableName);
verifyTableDescriptor(htd, tableName, families);
// Verify descriptor from HDFS
MasterFileSystem mfs = TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterFileSystem();
Path tableDir = FSUtils.getTableDir(mfs.getRootDir(), tableName);
htd = FSTableDescriptors.getTableDescriptorFromFs(mfs.getFileSystem(), tableDir);
verifyTableDescriptor(htd, tableName, families);
}
示例11: copyTableRegionInfo
import org.apache.hadoop.hbase.util.FSTableDescriptors; //导入方法依赖的package包/类
/**
* copy out Table RegionInfo into incremental backup image need to consider move this logic into
* HBackupFileSystem
* @param conn connection
* @param backupInfo backup info
* @param conf configuration
* @throws IOException exception
*/
public static void copyTableRegionInfo(Connection conn, BackupInfo backupInfo, Configuration conf)
throws IOException {
Path rootDir = FSUtils.getRootDir(conf);
FileSystem fs = rootDir.getFileSystem(conf);
// for each table in the table set, copy out the table info and region
// info files in the correct directory structure
for (TableName table : backupInfo.getTables()) {
if (!MetaTableAccessor.tableExists(conn, table)) {
LOG.warn("Table " + table + " does not exists, skipping it.");
continue;
}
TableDescriptor orig = FSTableDescriptors.getTableDescriptorFromFs(fs, rootDir, table);
// write a copy of descriptor to the target directory
Path target = new Path(backupInfo.getTableBackupDir(table));
FileSystem targetFs = target.getFileSystem(conf);
FSTableDescriptors descriptors =
new FSTableDescriptors(conf, targetFs, FSUtils.getRootDir(conf));
descriptors.createTableDescriptorForTableDirectory(target, orig, false);
LOG.debug("Attempting to copy table info for:" + table + " target: " + target
+ " descriptor: " + orig);
LOG.debug("Finished copying tableinfo.");
List<RegionInfo> regions = MetaTableAccessor.getTableRegions(conn, table);
// For each region, write the region info to disk
LOG.debug("Starting to write region info for table " + table);
for (RegionInfo regionInfo : regions) {
Path regionDir =
HRegion.getRegionDir(new Path(backupInfo.getTableBackupDir(table)), regionInfo);
regionDir = new Path(backupInfo.getTableBackupDir(table), regionDir.getName());
writeRegioninfoOnFilesystem(conf, targetFs, regionDir, regionInfo);
}
LOG.debug("Finished writing region info for table " + table);
}
}
示例12: getTableDescriptor
import org.apache.hadoop.hbase.util.FSTableDescriptors; //导入方法依赖的package包/类
private TableDescriptor getTableDescriptor(FileSystem fileSys, TableName tableName,
String lastIncrBackupId) throws IOException {
if (lastIncrBackupId != null) {
String target =
BackupUtils.getTableBackupDir(backupRootPath.toString(),
lastIncrBackupId, tableName);
return FSTableDescriptors.getTableDescriptorFromFs(fileSys, new Path(target));
}
return null;
}
示例13: verifyTableDescriptor
import org.apache.hadoop.hbase.util.FSTableDescriptors; //导入方法依赖的package包/类
private void verifyTableDescriptor(final TableName tableName, final byte[]... families)
throws Exception {
// Verify descriptor from master
TableDescriptor htd = admin.getDescriptor(tableName).get();
verifyTableDescriptor(htd, tableName, families);
// Verify descriptor from HDFS
MasterFileSystem mfs = TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterFileSystem();
Path tableDir = FSUtils.getTableDir(mfs.getRootDir(), tableName);
TableDescriptor td = FSTableDescriptors.getTableDescriptorFromFs(mfs.getFileSystem(), tableDir);
verifyTableDescriptor(td, tableName, families);
}
示例14: verifyTableDescriptor
import org.apache.hadoop.hbase.util.FSTableDescriptors; //导入方法依赖的package包/类
private void verifyTableDescriptor(final TableName tableName,
final byte[]... families) throws IOException {
Admin admin = TEST_UTIL.getAdmin();
// Verify descriptor from master
HTableDescriptor htd = admin.getTableDescriptor(tableName);
verifyTableDescriptor(htd, tableName, families);
// Verify descriptor from HDFS
MasterFileSystem mfs = TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterFileSystem();
Path tableDir = FSUtils.getTableDir(mfs.getRootDir(), tableName);
TableDescriptor td =
FSTableDescriptors.getTableDescriptorFromFs(mfs.getFileSystem(), tableDir);
verifyTableDescriptor(td, tableName, families);
}
示例15: compactTable
import org.apache.hadoop.hbase.util.FSTableDescriptors; //导入方法依赖的package包/类
private void compactTable(final Path tableDir, final boolean compactOnce, final boolean major)
throws IOException {
TableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir);
for (Path regionDir: FSUtils.getRegionDirs(fs, tableDir)) {
compactRegion(tableDir, htd, regionDir, compactOnce, major);
}
}