本文整理匯總了Java中org.apache.hadoop.hbase.client.HBaseAdmin.enableTable方法的典型用法代碼示例。如果您正苦於以下問題:Java HBaseAdmin.enableTable方法的具體用法?Java HBaseAdmin.enableTable怎麽用?Java HBaseAdmin.enableTable使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類org.apache.hadoop.hbase.client.HBaseAdmin
的用法示例。
在下文中一共展示了HBaseAdmin.enableTable方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: testDisableTableAndRestart
import org.apache.hadoop.hbase.client.HBaseAdmin; //導入方法依賴的package包/類
@Test(timeout = 300000)
public void testDisableTableAndRestart() throws Exception {
final TableName tableName = TableName.valueOf("testDisableTableAndRestart");
final MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
final HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
final HTableDescriptor desc = new HTableDescriptor(tableName);
desc.addFamily(new HColumnDescriptor(FAMILYNAME));
admin.createTable(desc);
admin.disableTable(tableName);
TEST_UTIL.waitTableDisabled(tableName.getName());
TEST_UTIL.getHBaseCluster().shutdown();
TEST_UTIL.getHBaseCluster().waitUntilShutDown();
TEST_UTIL.restartHBaseCluster(2);
admin.enableTable(tableName);
TEST_UTIL.waitTableEnabled(tableName);
}
示例2: setUp
import org.apache.hadoop.hbase.client.HBaseAdmin; //導入方法依賴的package包/類
@BeforeMethod
public void setUp() throws Exception {
HBaseAdmin admin = testutil.getHBaseAdmin();
if (!admin.tableExists(DEFAULT_TIMESTAMP_STORAGE_TABLE_NAME)) {
HTableDescriptor desc = new HTableDescriptor(TABLE_NAME);
HColumnDescriptor datafam = new HColumnDescriptor(DEFAULT_TIMESTAMP_STORAGE_CF_NAME);
datafam.setMaxVersions(Integer.MAX_VALUE);
desc.addFamily(datafam);
admin.createTable(desc);
}
if (admin.isTableDisabled(DEFAULT_TIMESTAMP_STORAGE_TABLE_NAME)) {
admin.enableTable(DEFAULT_TIMESTAMP_STORAGE_TABLE_NAME);
}
HTableDescriptor[] tables = admin.listTables();
for (HTableDescriptor t : tables) {
LOG.info(t.getNameAsString());
}
}
示例3: createMetaData
import org.apache.hadoop.hbase.client.HBaseAdmin; //導入方法依賴的package包/類
public static void createMetaData(ObjectValue object, HTable table) throws IOException {
final HBaseAdmin admin = new HBaseAdmin(table.getConfiguration());
// create non existing column families
for (Step<String> columnFamily : object.keySet()) {
if(columnFamily.getStep().equals("_id"))
continue;
final String columnFamilyId = columnFamily.getStep();
// check if column family exists
boolean exists = false;
for (HColumnDescriptor familyDescriptor : table.getTableDescriptor().getFamilies()) {
if(Bytes.toString(familyDescriptor.getName()).equals(columnFamilyId)) {
exists = true;
break;
}
}
// if not: add it
if(!exists) {
admin.disableTable(table.getTableName());
admin.addColumn(table.getTableName(), new HColumnDescriptor(columnFamilyId));
admin.enableTable(table.getTableName());
}
}
}
示例4: connect
import org.apache.hadoop.hbase.client.HBaseAdmin; //導入方法依賴的package包/類
@Override
public void connect() throws IOException
{
super.connect();
HTableDescriptor tdesc = table.getTableDescriptor();
if (!tdesc.hasFamily(columnFamilyBytes)) {
HBaseAdmin admin = new HBaseAdmin(table.getConfiguration());
admin.disableTable(table.getTableName());
try {
HColumnDescriptor cdesc = new HColumnDescriptor(columnFamilyBytes);
admin.addColumn(table.getTableName(), cdesc);
} finally {
admin.enableTable(table.getTableName());
admin.close();
}
}
}
示例5: updateTableAttribute
import org.apache.hadoop.hbase.client.HBaseAdmin; //導入方法依賴的package包/類
/**
@param rawAttributeName is the attribute name viewed by applications, it allows multiple values. For example, secondaryIndex in secondaryIndex$1 and coprocessor in corpcessor$2
@param indexOfAttribute is of the same raw attribute name, for example 2 in secondary$2
*/
static void updateTableAttribute(Configuration conf, byte[] tableName, String rawAttributeName, int indexOfAttribute, boolean ifUpdateorRemove, String value) throws IOException{
HBaseAdmin admin = new HBaseAdmin(conf);
HTableDescriptor desc = admin.getTableDescriptor(tableName);
admin.disableTable(tableName);
// System.out.println("TTDEBUG: disable table " + Bytes.toString(tableName));
String coprocessorKey = rawAttributeName + indexOfAttribute;
if(!ifUpdateorRemove) {
desc.remove(Bytes.toBytes(coprocessorKey));
} else {
desc.setValue(coprocessorKey, value);
}
admin.modifyTable(tableName, desc);
// System.out.println("TTDEBUG: modify table " + Bytes.toString(tableName));
admin.enableTable(tableName);
// System.out.println("TTDEBUG: enable table " + Bytes.toString(tableName));
HTableDescriptor descNew = admin.getTableDescriptor(tableName);
//modify table is asynchronous, has to loop over to check
while (!desc.equals(descNew)){
System.err.println("TTDEBUG: waiting for descriptor to change: from " + descNew + " to " + desc);
try {Thread.sleep(500);} catch(InterruptedException ex) {}
descNew = admin.getTableDescriptor(tableName);
}
}
示例6: createIndexTable
import org.apache.hadoop.hbase.client.HBaseAdmin; //導入方法依賴的package包/類
public static void createIndexTable(String userTable, Configuration conf,
Map<String, List<String>> indexColumnFamily) throws IOException, InterruptedException,
ClassNotFoundException {
HBaseAdmin hbaseAdmin = new IndexAdmin(conf);
try {
HTableDescriptor tableDescriptor = hbaseAdmin.getTableDescriptor(Bytes.toBytes(userTable));
String input = conf.get(TABLE_INPUT_COLS);
HTableDescriptor ihtd = parse(userTable, tableDescriptor, input, indexColumnFamily);
// disable the table
hbaseAdmin.disableTable(userTable);
// This will create the index table. Also modifies the existing table htable descriptor.
hbaseAdmin.modifyTable(Bytes.toBytes(userTable), ihtd);
hbaseAdmin.enableTable(Bytes.toBytes(userTable));
} finally {
if (hbaseAdmin != null) {
hbaseAdmin.close();
}
}
}
示例7: loadTest
import org.apache.hadoop.hbase.client.HBaseAdmin; //導入方法依賴的package包/類
@Test(timeout=TIMEOUT_MS)
public void loadTest() throws Exception {
HBaseAdmin admin = new HBaseAdmin(conf);
compression = Compression.Algorithm.GZ; // used for table setup
super.loadTest();
HColumnDescriptor hcd = getColumnDesc(admin);
System.err.println("\nDisabling encode-on-disk. Old column descriptor: " + hcd + "\n");
HTable t = new HTable(this.conf, TABLE);
assertAllOnLine(t);
admin.disableTable(TABLE);
admin.modifyColumn(TABLE, hcd);
System.err.println("\nRe-enabling table\n");
admin.enableTable(TABLE);
System.err.println("\nNew column descriptor: " +
getColumnDesc(admin) + "\n");
// The table may not have all regions on line yet. Assert online before
// moving to major compact.
assertAllOnLine(t);
System.err.println("\nCompacting the table\n");
admin.majorCompact(TABLE.getName());
// Wait until compaction completes
Threads.sleepWithoutInterrupt(5000);
HRegionServer rs = TEST_UTIL.getMiniHBaseCluster().getRegionServer(0);
while (rs.compactSplitThread.getCompactionQueueSize() > 0) {
Threads.sleep(50);
}
System.err.println("\nDone with the test, shutting down the cluster\n");
}
示例8: setUp
import org.apache.hadoop.hbase.client.HBaseAdmin; //導入方法依賴的package包/類
@BeforeMethod
public void setUp() throws Exception {
HBaseAdmin admin = testutil.getHBaseAdmin();
if (!admin.tableExists(TEST_TABLE)) {
HTableDescriptor desc = new HTableDescriptor(TABLE_NAME);
HColumnDescriptor datafam = new HColumnDescriptor(commitTableFamily);
datafam.setMaxVersions(Integer.MAX_VALUE);
desc.addFamily(datafam);
HColumnDescriptor lowWatermarkFam = new HColumnDescriptor(lowWatermarkFamily);
lowWatermarkFam.setMaxVersions(Integer.MAX_VALUE);
desc.addFamily(lowWatermarkFam);
desc.addCoprocessor("org.apache.hadoop.hbase.coprocessor.AggregateImplementation");
admin.createTable(desc);
}
if (admin.isTableDisabled(TEST_TABLE)) {
admin.enableTable(TEST_TABLE);
}
HTableDescriptor[] tables = admin.listTables();
for (HTableDescriptor t : tables) {
LOG.info(t.getNameAsString());
}
}
示例9: setUpBeforeClass
import org.apache.hadoop.hbase.client.HBaseAdmin; //導入方法依賴的package包/類
/**
* Spin up a cluster with a bunch of regions on it.
*/
@BeforeClass
public static void setUpBeforeClass() throws Exception {
TEST_UTIL.startMiniCluster(NB_SLAVES);
TEST_UTIL.getConfiguration().setBoolean("hbase.master.enabletable.roundrobin", true);
ZooKeeperWatcher zkw = HBaseTestingUtility.getZooKeeperWatcher(TEST_UTIL);
HTableDescriptor htd = new HTableDescriptor(TABLENAME);
htd.addFamily(new HColumnDescriptor(FAMILY));
TEST_UTIL.createMultiRegionsInMeta(TEST_UTIL.getConfiguration(), htd,
HBaseTestingUtility.KEYS);
// Make a mark for the table in the filesystem.
FileSystem fs = FileSystem.get(TEST_UTIL.getConfiguration());
FSTableDescriptors.
createTableDescriptor(fs, FSUtils.getRootDir(TEST_UTIL.getConfiguration()), htd);
// Assign out the regions we just created.
HBaseAdmin admin = new HBaseAdmin(TEST_UTIL.getConfiguration());
MiniHBaseCluster cluster = TEST_UTIL.getMiniHBaseCluster();
admin.disableTable(TABLENAME);
admin.enableTable(TABLENAME);
boolean ready = false;
while (!ready) {
ZKAssign.blockUntilNoRIT(zkw);
// Assert that every regionserver has some regions on it, else invoke the balancer.
ready = true;
for (int i = 0; i < NB_SLAVES; i++) {
HRegionServer hrs = cluster.getRegionServer(i);
if (hrs.getOnlineRegions().isEmpty()) {
ready = false;
break;
}
}
if (!ready) {
admin.balancer();
Thread.sleep(100);
}
}
}
示例10: applyColumnFamilyOptions
import org.apache.hadoop.hbase.client.HBaseAdmin; //導入方法依賴的package包/類
/**
* Apply column family options such as Bloom filters, compression, and data
* block encoding.
*/
protected void applyColumnFamilyOptions(byte[] tableName,
byte[][] columnFamilies) throws IOException {
HBaseAdmin admin = new HBaseAdmin(conf);
HTableDescriptor tableDesc = admin.getTableDescriptor(tableName);
LOG.info("Disabling table " + Bytes.toString(tableName));
admin.disableTable(tableName);
for (byte[] cf : columnFamilies) {
HColumnDescriptor columnDesc = tableDesc.getFamily(cf);
boolean isNewCf = columnDesc == null;
if (isNewCf) {
columnDesc = new HColumnDescriptor(cf);
}
if (bloomType != null) {
columnDesc.setBloomFilterType(bloomType);
}
if (compressAlgo != null) {
columnDesc.setCompressionType(compressAlgo);
}
if (dataBlockEncodingAlgo != null) {
columnDesc.setDataBlockEncoding(dataBlockEncodingAlgo);
columnDesc.setEncodeOnDisk(!encodeInCacheOnly);
}
if (inMemoryCF) {
columnDesc.setInMemory(inMemoryCF);
}
if (isNewCf) {
admin.addColumn(tableName, columnDesc);
} else {
admin.modifyColumn(tableName, columnDesc);
}
}
LOG.info("Enabling table " + Bytes.toString(tableName));
admin.enableTable(tableName);
}
示例11: loadTest
import org.apache.hadoop.hbase.client.HBaseAdmin; //導入方法依賴的package包/類
@Test(timeout=TIMEOUT_MS)
public void loadTest() throws Exception {
HBaseAdmin admin = new HBaseAdmin(conf);
compression = Compression.Algorithm.GZ; // used for table setup
super.loadTest();
HColumnDescriptor hcd = getColumnDesc(admin);
System.err.println("\nDisabling encode-on-disk. Old column descriptor: " +
hcd + "\n");
admin.disableTable(TABLE);
hcd.setEncodeOnDisk(false);
admin.modifyColumn(TABLE, hcd);
System.err.println("\nRe-enabling table\n");
admin.enableTable(TABLE);
System.err.println("\nNew column descriptor: " +
getColumnDesc(admin) + "\n");
System.err.println("\nCompacting the table\n");
admin.majorCompact(TABLE);
// Wait until compaction completes
Threads.sleepWithoutInterrupt(5000);
HRegionServer rs = TEST_UTIL.getMiniHBaseCluster().getRegionServer(0);
while (rs.compactSplitThread.getCompactionQueueSize() > 0) {
Threads.sleep(50);
}
System.err.println("\nDone with the test, shutting down the cluster\n");
}
示例12: testEnableTableWithNoRegionServers
import org.apache.hadoop.hbase.client.HBaseAdmin; //導入方法依賴的package包/類
@Test(timeout = 300000)
public void testEnableTableWithNoRegionServers() throws Exception {
final TableName tableName = TableName.valueOf("testEnableTableWithNoRegionServers");
final MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
final HMaster m = cluster.getMaster();
final HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
final HTableDescriptor desc = new HTableDescriptor(tableName);
desc.addFamily(new HColumnDescriptor(FAMILYNAME));
admin.createTable(desc);
admin.disableTable(tableName);
TEST_UTIL.waitTableDisabled(tableName.getName());
admin.enableTable(tableName);
TEST_UTIL.waitTableEnabled(tableName);
// disable once more
admin.disableTable(tableName);
TEST_UTIL.waitUntilNoRegionsInTransition(60000);
// now stop region servers
JVMClusterUtil.RegionServerThread rs = cluster.getRegionServerThreads().get(0);
rs.getRegionServer().stop("stop");
cluster.waitForRegionServerToStop(rs.getRegionServer().getServerName(), 10000);
TEST_UTIL.waitUntilAllRegionsAssigned(TableName.META_TABLE_NAME);
admin.enableTable(tableName);
assertTrue(admin.isTableEnabled(tableName));
JVMClusterUtil.RegionServerThread rs2 = cluster.startRegionServer();
m.getAssignmentManager().assign(admin.getTableRegions(tableName));
TEST_UTIL.waitUntilAllRegionsAssigned(tableName);
List<HRegionInfo> onlineRegions = admin.getOnlineRegions(
rs2.getRegionServer().getServerName());
assertEquals(2, onlineRegions.size());
assertEquals(tableName, onlineRegions.get(1).getTable());
}
示例13: updateCoprocessor
import org.apache.hadoop.hbase.client.HBaseAdmin; //導入方法依賴的package包/類
private static void updateCoprocessor(Configuration conf, byte[] dataTableName) throws IOException{
HBaseAdmin admin = new HBaseAdmin(conf);
HTableDescriptor desc = admin.getTableDescriptor(dataTableName);
admin.disableTable(dataTableName);
System.out.println("TTDEBUG: disable data table");
if(INDEX_CP_CLASS.contains("null")) {
desc.remove(Bytes.toBytes(INDEX_CP_NAME));
} else {
desc.setValue(INDEX_CP_NAME, INDEX_CP_PATH + "|" + INDEX_CP_CLASS + "|1001|arg1=1,arg2=2");
}
HColumnDescriptor descIndexCF = desc.getFamily(Bytes.toBytes("cf"));//TOREMOVE don't use cf,
//KEEP_DELETED_CELLS => 'true'
descIndexCF.setKeepDeletedCells(true);
descIndexCF.setTimeToLive(HConstants.FOREVER);
descIndexCF.setMaxVersions(Integer.MAX_VALUE);
admin.modifyTable(dataTableName, desc);
System.out.println("TTDEBUG: modify data table");
admin.enableTable(dataTableName);
System.out.println("TTDEBUG: enable data table");
HTableDescriptor descNew = admin.getTableDescriptor(dataTableName);
//modify table is asynchronous, has to loop over to check
while (!desc.equals(descNew)){
System.out.println("TTDEBUG: waiting for descriptor to change: from " + descNew + " to " + desc);
try {Thread.sleep(500);} catch(InterruptedException ex) {}
descNew = admin.getTableDescriptor(dataTableName);
}
}
示例14: setUp
import org.apache.hadoop.hbase.client.HBaseAdmin; //導入方法依賴的package包/類
@Before
@Override
public void setUp() throws Exception {
// Initialize the cluster. This invokes LoadTestTool -init_only, which
// will create the test table, appropriately pre-split
super.setUp();
// Update the test table schema so HFiles from this point will be written with
// encryption features enabled.
final HBaseAdmin admin = util.getHBaseAdmin();
HTableDescriptor tableDescriptor =
new HTableDescriptor(admin.getTableDescriptor(Bytes.toBytes(getTablename())));
for (HColumnDescriptor columnDescriptor: tableDescriptor.getColumnFamilies()) {
columnDescriptor.setEncryptionType("AES");
LOG.info("Updating CF schema for " + getTablename() + "." +
columnDescriptor.getNameAsString());
admin.disableTable(getTablename());
admin.modifyColumn(getTablename(), columnDescriptor);
admin.enableTable(getTablename());
util.waitFor(30000, 1000, true, new Predicate<IOException>() {
@Override
public boolean evaluate() throws IOException {
return admin.isTableAvailable(getTablename());
}
});
}
}
示例15: modifyTable
import org.apache.hadoop.hbase.client.HBaseAdmin; //導入方法依賴的package包/類
@Test
public void modifyTable() throws Exception {
String TABLE_NAME = "TEST_BENCHMARK";
//
Configuration configuration = createConfiguration();
HBaseAdmin hbaseAdmin = createHBaseAdmin(configuration);
HTableDescriptor htd = hbaseAdmin.getTableDescriptor(Bytes
.toBytes(TABLE_NAME));
//
HTableDescriptor newHtd = new HTableDescriptor(htd);
newHtd.setValue(HTableDescriptor.SPLIT_POLICY,
ConstantSizeRegionSplitPolicy.class.getName());
//
boolean disabled = false;
if (hbaseAdmin.isTableEnabled(TABLE_NAME)) {
hbaseAdmin.disableTable(TABLE_NAME);
disabled = true;
}
//
hbaseAdmin.modifyTable(Bytes.toBytes(TABLE_NAME), newHtd);
//
if (disabled) {
hbaseAdmin.enableTable(TABLE_NAME);
}
//
System.out.println(newHtd);
}