本文整理汇总了Java中org.apache.hadoop.hbase.util.FSTableDescriptors.createTableDescriptor方法的典型用法代码示例。如果您正苦于以下问题:Java FSTableDescriptors.createTableDescriptor方法的具体用法?Java FSTableDescriptors.createTableDescriptor怎么用?Java FSTableDescriptors.createTableDescriptor使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hbase.util.FSTableDescriptors
的用法示例。
在下文中一共展示了FSTableDescriptors.createTableDescriptor方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testGetSetOfHTD
import org.apache.hadoop.hbase.util.FSTableDescriptors; //导入方法依赖的package包/类
@Test
public void testGetSetOfHTD() throws IOException {
HBaseTestingUtility HTU = new HBaseTestingUtility();
final String tablename = "testGetSetOfHTD";
// Delete the temporary table directory that might still be there from the
// previous test run.
FSTableDescriptors.deleteTableDescriptorIfExists(tablename,
HTU.getConfiguration());
HTableDescriptor htd = new HTableDescriptor(tablename);
FSTableDescriptors.createTableDescriptor(htd, HTU.getConfiguration());
HRegionInfo hri = new HRegionInfo(Bytes.toBytes("testGetSetOfHTD"),
HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
HTableDescriptor htd2 = hri.getTableDesc();
assertTrue(htd.equals(htd2));
final String key = "SOME_KEY";
assertNull(htd.getValue(key));
final String value = "VALUE";
htd.setValue(key, value);
hri.setTableDesc(htd);
HTableDescriptor htd3 = hri.getTableDesc();
assertTrue(htd.equals(htd3));
}
示例2: testShouldAllowForcefulCreationOfAlreadyExistingTableDescriptor
import org.apache.hadoop.hbase.util.FSTableDescriptors; //导入方法依赖的package包/类
@Test
public void testShouldAllowForcefulCreationOfAlreadyExistingTableDescriptor()
throws Exception {
final String name = "createNewTableNew2";
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
Path rootdir = new Path(UTIL.getDataTestDir(), name);
FSTableDescriptors fstd = new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir);
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name));
fstd.createTableDescriptor(htd, false);
assertTrue("Should create new table descriptor",
fstd.createTableDescriptor(htd, true));
}
示例3: createRootTableInfo
import org.apache.hadoop.hbase.util.FSTableDescriptors; //导入方法依赖的package包/类
private void createRootTableInfo(Path rd) throws IOException {
// Create ROOT tableInfo if required.
if (!FSTableDescriptors.isTableInfoExists(fs, rd,
Bytes.toString(HRegionInfo.ROOT_REGIONINFO.getTableName()))) {
FSTableDescriptors.createTableDescriptor(HTableDescriptor.ROOT_TABLEDESC, this.conf);
}
}
示例4: setUpBeforeClass
import org.apache.hadoop.hbase.util.FSTableDescriptors; //导入方法依赖的package包/类
/**
* Spin up a cluster with a bunch of regions on it.
*/
@BeforeClass
public static void setUpBeforeClass() throws Exception {
TEST_UTIL.startMiniCluster(NB_SLAVES);
TEST_UTIL.getConfiguration().setBoolean("hbase.master.enabletable.roundrobin", true);
ZooKeeperWatcher zkw = HBaseTestingUtility.getZooKeeperWatcher(TEST_UTIL);
HTableDescriptor htd = new HTableDescriptor(TABLENAME);
htd.addFamily(new HColumnDescriptor(FAMILY));
TEST_UTIL.createMultiRegionsInMeta(TEST_UTIL.getConfiguration(), htd,
HBaseTestingUtility.KEYS);
// Make a mark for the table in the filesystem.
FileSystem fs = FileSystem.get(TEST_UTIL.getConfiguration());
FSTableDescriptors.
createTableDescriptor(fs, FSUtils.getRootDir(TEST_UTIL.getConfiguration()), htd);
// Assign out the regions we just created.
HBaseAdmin admin = new HBaseAdmin(TEST_UTIL.getConfiguration());
MiniHBaseCluster cluster = TEST_UTIL.getMiniHBaseCluster();
admin.disableTable(TABLENAME);
admin.enableTable(TABLENAME);
boolean ready = false;
while (!ready) {
ZKAssign.blockUntilNoRIT(zkw);
// Assert that every regionserver has some regions on it, else invoke the balancer.
ready = true;
for (int i = 0; i < NB_SLAVES; i++) {
HRegionServer hrs = cluster.getRegionServer(i);
if (hrs.getOnlineRegions().isEmpty()) {
ready = false;
break;
}
}
if (!ready) {
admin.balancer();
Thread.sleep(100);
}
}
}
示例5: testShouldAllowForcefulCreationOfAlreadyExistingTableDescriptor
import org.apache.hadoop.hbase.util.FSTableDescriptors; //导入方法依赖的package包/类
@Test
public void testShouldAllowForcefulCreationOfAlreadyExistingTableDescriptor()
throws Exception {
final String name = "createNewTableNew2";
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
Path rootdir = new Path(UTIL.getDataTestDir(), name);
HTableDescriptor htd = new HTableDescriptor(name);
FSTableDescriptors.createTableDescriptor(fs, rootdir, htd, false);
assertTrue("Should create new table descriptor",
FSTableDescriptors.createTableDescriptor(fs, rootdir, htd, true));
}
示例6: testRestore
import org.apache.hadoop.hbase.util.FSTableDescriptors; //导入方法依赖的package包/类
/**
* Execute the restore operation
* @param snapshotDir The snapshot directory to use as "restore source"
* @param sourceTableName The name of the snapshotted table
* @param htdClone The HTableDescriptor of the table to restore/clone.
*/
public void testRestore(final Path snapshotDir, final String sourceTableName,
final HTableDescriptor htdClone) throws IOException {
LOG.debug("pre-restore table=" + htdClone.getNameAsString() + " snapshot=" + snapshotDir);
FSUtils.logFileSystemState(fs, rootDir, LOG);
FSTableDescriptors.createTableDescriptor(htdClone, conf);
RestoreSnapshotHelper helper = getRestoreHelper(rootDir, snapshotDir, sourceTableName, htdClone);
helper.restoreHdfsRegions();
LOG.debug("post-restore table=" + htdClone.getNameAsString() + " snapshot=" + snapshotDir);
FSUtils.logFileSystemState(fs, rootDir, LOG);
}
示例7: testShouldAllowForcefulCreationOfAlreadyExistingTableDescriptor
import org.apache.hadoop.hbase.util.FSTableDescriptors; //导入方法依赖的package包/类
@Test
public void testShouldAllowForcefulCreationOfAlreadyExistingTableDescriptor()
throws Exception {
final String name = "createNewTableNew2";
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
Path rootdir = new Path(UTIL.getDataTestDir(), name);
FSTableDescriptors fstd = new FSTableDescriptors(fs, rootdir);
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name));
fstd.createTableDescriptor(htd, false);
assertTrue("Should create new table descriptor",
fstd.createTableDescriptor(htd, true));
}
示例8: testShouldAllowForcefulCreationOfAlreadyExistingTableDescriptor
import org.apache.hadoop.hbase.util.FSTableDescriptors; //导入方法依赖的package包/类
@Test
public void testShouldAllowForcefulCreationOfAlreadyExistingTableDescriptor()
throws Exception {
final String name = this.name.getMethodName();
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
Path rootdir = new Path(UTIL.getDataTestDir(), name);
FSTableDescriptors fstd = new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir);
TableDescriptor htd = TableDescriptorBuilder.newBuilder(TableName.valueOf(name)).build();
fstd.createTableDescriptor(htd, false);
assertTrue("Should create new table descriptor",
fstd.createTableDescriptor(htd, true));
}
示例9: checkRootDir
import org.apache.hadoop.hbase.util.FSTableDescriptors; //导入方法依赖的package包/类
/**
* Get the rootdir. Make sure its wholesome and exists before returning.
* @param rd
* @param c
* @param fs
* @return hbase.rootdir (after checks for existence and bootstrapping if
* needed populating the directory with necessary bootup files).
* @throws IOException
*/
@SuppressWarnings("deprecation")
private Path checkRootDir(final Path rd, final Configuration c,
final FileSystem fs)
throws IOException {
// If FS is in safe mode wait till out of it.
FSUtils.waitOnSafeMode(c, c.getInt(HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000));
// Filesystem is good. Go ahead and check for hbase.rootdir.
try {
if (!fs.exists(rd)) {
fs.mkdirs(rd);
// DFS leaves safe mode with 0 DNs when there are 0 blocks.
// We used to handle this by checking the current DN count and waiting until
// it is nonzero. With security, the check for datanode count doesn't work --
// it is a privileged op. So instead we adopt the strategy of the jobtracker
// and simply retry file creation during bootstrap indefinitely. As soon as
// there is one datanode it will succeed. Permission problems should have
// already been caught by mkdirs above.
FSUtils.setVersion(fs, rd, c.getInt(HConstants.THREAD_WAKE_FREQUENCY,
10 * 1000), c.getInt(HConstants.VERSION_FILE_WRITE_ATTEMPTS,
HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS));
} else {
if (!fs.isDirectory(rd)) {
throw new IllegalArgumentException(rd.toString() + " is not a directory");
}
// as above
FSUtils.checkVersion(fs, rd, true, c.getInt(HConstants.THREAD_WAKE_FREQUENCY,
10 * 1000), c.getInt(HConstants.VERSION_FILE_WRITE_ATTEMPTS,
HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS));
}
} catch (DeserializationException de) {
LOG.fatal("Please fix invalid configuration for " + HConstants.HBASE_DIR, de);
IOException ioe = new IOException();
ioe.initCause(de);
throw ioe;
} catch (IllegalArgumentException iae) {
LOG.fatal("Please fix invalid configuration for "
+ HConstants.HBASE_DIR + " " + rd.toString(), iae);
throw iae;
}
// Make sure cluster ID exists
if (!FSUtils.checkClusterIdExists(fs, rd, c.getInt(
HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000))) {
FSUtils.setClusterId(fs, rd, new ClusterId(), c.getInt(HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000));
}
clusterId = FSUtils.getClusterId(fs, rd);
// Make sure the meta region directory exists!
if (!FSUtils.metaRegionExists(fs, rd)) {
bootstrap(rd, c);
} else {
// Migrate table descriptor files if necessary
org.apache.hadoop.hbase.util.FSTableDescriptorMigrationToSubdir
.migrateFSTableDescriptorsIfNecessary(fs, rd);
}
// Create tableinfo-s for hbase:meta if not already there.
// meta table is a system table, so descriptors are predefined,
// we should get them from registry.
FSTableDescriptors fsd = new FSTableDescriptors(c, fs, rd);
fsd.createTableDescriptor(
new HTableDescriptor(fsd.get(TableName.META_TABLE_NAME)));
return rd;
}
示例10: testACLTableMigration
import org.apache.hadoop.hbase.util.FSTableDescriptors; //导入方法依赖的package包/类
@Test (timeout = 300000)
public void testACLTableMigration() throws IOException {
Path rootDir = TEST_UTIL.getDataTestDirOnTestFS("testACLTable");
FileSystem fs = TEST_UTIL.getTestFileSystem();
Configuration conf = TEST_UTIL.getConfiguration();
byte[] FAMILY = Bytes.toBytes("l");
byte[] QUALIFIER = Bytes.toBytes("testUser");
byte[] VALUE = Bytes.toBytes("RWCA");
// Create a Region
HTableDescriptor aclTable = new HTableDescriptor(TableName.valueOf("testACLTable"));
aclTable.addFamily(new HColumnDescriptor(FAMILY));
FSTableDescriptors fstd = new FSTableDescriptors(conf, fs, rootDir);
fstd.createTableDescriptor(aclTable);
HRegionInfo hriAcl = new HRegionInfo(aclTable.getTableName(), null, null);
HRegion region = HRegion.createHRegion(hriAcl, rootDir, conf, aclTable);
try {
// Create rows
Put p = new Put(Bytes.toBytes("-ROOT-"));
p.addImmutable(FAMILY, QUALIFIER, VALUE);
region.put(p);
p = new Put(Bytes.toBytes(".META."));
p.addImmutable(FAMILY, QUALIFIER, VALUE);
region.put(p);
p = new Put(Bytes.toBytes("_acl_"));
p.addImmutable(FAMILY, QUALIFIER, VALUE);
region.put(p);
NamespaceUpgrade upgrade = new NamespaceUpgrade();
upgrade.updateAcls(region);
// verify rows -ROOT- is removed
Get g = new Get(Bytes.toBytes("-ROOT-"));
Result r = region.get(g);
assertTrue(r == null || r.size() == 0);
// verify rows _acl_ is renamed to hbase:acl
g = new Get(AccessControlLists.ACL_TABLE_NAME.toBytes());
r = region.get(g);
assertTrue(r != null && r.size() == 1);
assertTrue(Bytes.compareTo(VALUE, r.getValue(FAMILY, QUALIFIER)) == 0);
// verify rows .META. is renamed to hbase:meta
g = new Get(TableName.META_TABLE_NAME.toBytes());
r = region.get(g);
assertTrue(r != null && r.size() == 1);
assertTrue(Bytes.compareTo(VALUE, r.getValue(FAMILY, QUALIFIER)) == 0);
} finally {
region.close();
// Delete the region
HRegionFileSystem.deleteRegionFromFileSystem(conf, fs,
FSUtils.getTableDir(rootDir, hriAcl.getTable()), hriAcl);
}
}
示例11: testACLTableMigration
import org.apache.hadoop.hbase.util.FSTableDescriptors; //导入方法依赖的package包/类
@Test (timeout = 300000)
public void testACLTableMigration() throws IOException {
Path rootDir = TEST_UTIL.getDataTestDirOnTestFS("testACLTable");
FileSystem fs = TEST_UTIL.getTestFileSystem();
Configuration conf = TEST_UTIL.getConfiguration();
byte[] FAMILY = Bytes.toBytes("l");
byte[] QUALIFIER = Bytes.toBytes("testUser");
byte[] VALUE = Bytes.toBytes("RWCA");
// Create a Region
HTableDescriptor aclTable = new HTableDescriptor(TableName.valueOf("testACLTable"));
aclTable.addFamily(new HColumnDescriptor(FAMILY));
FSTableDescriptors fstd = new FSTableDescriptors(fs, rootDir);
fstd.createTableDescriptor(aclTable);
HRegionInfo hriAcl = new HRegionInfo(aclTable.getTableName(), null, null);
HRegion region = HRegion.createHRegion(hriAcl, rootDir, conf, aclTable);
try {
// Create rows
Put p = new Put(Bytes.toBytes("-ROOT-"));
p.addImmutable(FAMILY, QUALIFIER, VALUE);
region.put(p);
p = new Put(Bytes.toBytes(".META."));
p.addImmutable(FAMILY, QUALIFIER, VALUE);
region.put(p);
p = new Put(Bytes.toBytes("_acl_"));
p.addImmutable(FAMILY, QUALIFIER, VALUE);
region.put(p);
NamespaceUpgrade upgrade = new NamespaceUpgrade();
upgrade.updateAcls(region);
// verify rows -ROOT- is removed
Get g = new Get(Bytes.toBytes("-ROOT-"));
Result r = region.get(g);
assertTrue(r == null || r.size() == 0);
// verify rows _acl_ is renamed to hbase:acl
g = new Get(AccessControlLists.ACL_TABLE_NAME.toBytes());
r = region.get(g);
assertTrue(r != null && r.size() == 1);
assertTrue(Bytes.compareTo(VALUE, r.getValue(FAMILY, QUALIFIER)) == 0);
// verify rows .META. is renamed to hbase:meta
g = new Get(TableName.META_TABLE_NAME.toBytes());
r = region.get(g);
assertTrue(r != null && r.size() == 1);
assertTrue(Bytes.compareTo(VALUE, r.getValue(FAMILY, QUALIFIER)) == 0);
} finally {
region.close();
// Delete the region
HRegionFileSystem.deleteRegionFromFileSystem(conf, fs,
FSUtils.getTableDir(rootDir, hriAcl.getTable()), hriAcl);
}
}
示例12: checkRootDir
import org.apache.hadoop.hbase.util.FSTableDescriptors; //导入方法依赖的package包/类
/**
* Get the rootdir. Make sure its wholesome and exists before returning.
* @param rd
* @param c
* @param fs
* @return hbase.rootdir (after checks for existence and bootstrapping if
* needed populating the directory with necessary bootup files).
* @throws IOException
*/
private Path checkRootDir(final Path rd, final Configuration c, final FileSystem fs)
throws IOException {
// If FS is in safe mode wait till out of it.
FSUtils.waitOnSafeMode(c, c.getInt(HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000));
// Filesystem is good. Go ahead and check for hbase.rootdir.
try {
if (!fs.exists(rd)) {
fs.mkdirs(rd);
// DFS leaves safe mode with 0 DNs when there are 0 blocks.
// We used to handle this by checking the current DN count and waiting until
// it is nonzero. With security, the check for datanode count doesn't work --
// it is a privileged op. So instead we adopt the strategy of the jobtracker
// and simply retry file creation during bootstrap indefinitely. As soon as
// there is one datanode it will succeed. Permission problems should have
// already been caught by mkdirs above.
FSUtils.setVersion(fs, rd, c.getInt(HConstants.THREAD_WAKE_FREQUENCY,
10 * 1000), c.getInt(HConstants.VERSION_FILE_WRITE_ATTEMPTS,
HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS));
} else {
if (!fs.isDirectory(rd)) {
throw new IllegalArgumentException(rd.toString() + " is not a directory");
}
// as above
FSUtils.checkVersion(fs, rd, true, c.getInt(HConstants.THREAD_WAKE_FREQUENCY,
10 * 1000), c.getInt(HConstants.VERSION_FILE_WRITE_ATTEMPTS,
HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS));
}
} catch (DeserializationException de) {
LOG.error(HBaseMarkers.FATAL, "Please fix invalid configuration for "
+ HConstants.HBASE_DIR, de);
IOException ioe = new IOException();
ioe.initCause(de);
throw ioe;
} catch (IllegalArgumentException iae) {
LOG.error(HBaseMarkers.FATAL, "Please fix invalid configuration for "
+ HConstants.HBASE_DIR + " " + rd.toString(), iae);
throw iae;
}
// Make sure cluster ID exists
if (!FSUtils.checkClusterIdExists(fs, rd, c.getInt(
HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000))) {
FSUtils.setClusterId(fs, rd, new ClusterId(), c.getInt(HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000));
}
clusterId = FSUtils.getClusterId(fs, rd);
// Make sure the meta region directory exists!
if (!FSUtils.metaRegionExists(fs, rd)) {
bootstrap(rd, c);
}
// Create tableinfo-s for hbase:meta if not already there.
// assume, created table descriptor is for enabling table
// meta table is a system table, so descriptors are predefined,
// we should get them from registry.
FSTableDescriptors fsd = new FSTableDescriptors(c, fs, rd);
fsd.createTableDescriptor(fsd.get(TableName.META_TABLE_NAME));
return rd;
}
示例13: handleCreateTable
import org.apache.hadoop.hbase.util.FSTableDescriptors; //导入方法依赖的package包/类
private void handleCreateTable() throws IOException, KeeperException {
// TODO: Currently we make the table descriptor and as side-effect the
// tableDir is created. Should we change below method to be createTable
// where we create table in tmp dir with its table descriptor file and then
// do rename to move it into place?
FSTableDescriptors.createTableDescriptor(this.hTableDescriptor, this.conf);
List<HRegionInfo> regionInfos = new ArrayList<HRegionInfo>();
final int batchSize =
this.conf.getInt("hbase.master.createtable.batchsize", 100);
HLog hlog = null;
for (int regionIdx = 0; regionIdx < this.newRegions.length; regionIdx++) {
HRegionInfo newRegion = this.newRegions[regionIdx];
// 1. Create HRegion
HRegion region = HRegion.createHRegion(newRegion,
this.fileSystemManager.getRootDir(), this.conf,
this.hTableDescriptor, hlog);
if (hlog == null) {
hlog = region.getLog();
}
regionInfos.add(region.getRegionInfo());
if (regionIdx % batchSize == 0) {
// 2. Insert into META
MetaEditor.addRegionsToMeta(this.catalogTracker, regionInfos);
regionInfos.clear();
}
// 3. Close the new region to flush to disk. Close log file too.
region.close();
}
hlog.closeAndDelete();
if (regionInfos.size() > 0) {
MetaEditor.addRegionsToMeta(this.catalogTracker, regionInfos);
}
// 4. Trigger immediate assignment of the regions in round-robin fashion
List<ServerName> servers = serverManager.getOnlineServersList();
try {
this.assignmentManager.assignUserRegions(Arrays.asList(newRegions),
servers);
} catch (InterruptedException ie) {
LOG.error("Caught " + ie + " during round-robin assignment");
throw new IOException(ie);
}
// 5. Set table enabled flag up in zk.
try {
assignmentManager.getZKTable().
setEnabledTable(this.hTableDescriptor.getNameAsString());
} catch (KeeperException e) {
throw new IOException("Unable to ensure that the table will be" +
" enabled because of a ZooKeeper issue", e);
}
}
示例14: createTableDescriptor
import org.apache.hadoop.hbase.util.FSTableDescriptors; //导入方法依赖的package包/类
/**
* Create new HTableDescriptor in HDFS.
*
* @param htableDescriptor
*/
public void createTableDescriptor(HTableDescriptor htableDescriptor)
throws IOException {
FSTableDescriptors.createTableDescriptor(htableDescriptor, conf);
}
示例15: createTableDescriptor
import org.apache.hadoop.hbase.util.FSTableDescriptors; //导入方法依赖的package包/类
/**
* Create new HTableDescriptor in HDFS.
*
* @param htableDescriptor
*/
public void createTableDescriptor(HTableDescriptor htableDescriptor)
throws IOException {
FSTableDescriptors.createTableDescriptor(htableDescriptor, conf);
}