本文整理汇总了Java中org.apache.hadoop.hbase.TableName.getNameAsString方法的典型用法代码示例。如果您正苦于以下问题:Java TableName.getNameAsString方法的具体用法?Java TableName.getNameAsString怎么用?Java TableName.getNameAsString使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hbase.TableName
的用法示例。
在下文中一共展示了TableName.getNameAsString方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: tableDeleted
import org.apache.hadoop.hbase.TableName; //导入方法依赖的package包/类
@Override
public void tableDeleted(TableName tableName) throws IOException {
//table write lock from DeleteHandler is already released, just delete the parent znode
String tableNameStr = tableName.getNameAsString();
String tableLockZNode = ZKUtil.joinZNode(zkWatcher.tableLockZNode, tableNameStr);
try {
ZKUtil.deleteNode(zkWatcher, tableLockZNode);
} catch (KeeperException ex) {
if (ex.code() == KeeperException.Code.NOTEMPTY) {
//we might get this in rare occasions where a CREATE table or some other table operation
//is waiting to acquire the lock. In this case, parent znode won't be deleted.
LOG.warn("Could not delete the znode for table locks because NOTEMPTY: "
+ tableLockZNode);
return;
}
throw new IOException(ex);
}
}
示例2: checkAndUpdateNamespaceRegionCount
import org.apache.hadoop.hbase.TableName; //导入方法依赖的package包/类
/**
* Check and update region count for an existing table. To handle scenarios like restore snapshot
* @param TableName name of the table for region count needs to be checked and updated
* @param incr count of regions
* @throws QuotaExceededException if quota exceeds for the number of regions allowed in a
* namespace
* @throws IOException Signals that an I/O exception has occurred.
*/
synchronized void checkAndUpdateNamespaceRegionCount(TableName name, int incr)
throws IOException {
String namespace = name.getNamespaceAsString();
NamespaceDescriptor nspdesc = getNamespaceDescriptor(namespace);
if (nspdesc != null) {
NamespaceTableAndRegionInfo currentStatus = getState(namespace);
int regionCountOfTable = currentStatus.getRegionCountOfTable(name);
if ((currentStatus.getRegionCount() - regionCountOfTable + incr) > TableNamespaceManager
.getMaxRegions(nspdesc)) {
throw new QuotaExceededException("The table " + name.getNameAsString()
+ " region count cannot be updated as it would exceed maximum number "
+ "of regions allowed in the namespace. The total number of regions permitted is "
+ TableNamespaceManager.getMaxRegions(nspdesc));
}
currentStatus.removeTable(name);
currentStatus.addTable(name, incr);
}
}
示例3: testBulkOutputWithInvalidLabels
import org.apache.hadoop.hbase.TableName; //导入方法依赖的package包/类
@Test
public void testBulkOutputWithInvalidLabels() throws Exception {
TableName tableName = TableName.valueOf("test-" + UUID.randomUUID());
Path hfiles = new Path(util.getDataTestDirOnTestFS(tableName.getNameAsString()), "hfiles");
// Prepare the arguments required for the test.
String[] args =
new String[] { "-D" + ImportTsv.BULK_OUTPUT_CONF_KEY + "=" + hfiles.toString(),
"-D" + ImportTsv.COLUMNS_CONF_KEY + "=HBASE_ROW_KEY,FAM:A,FAM:B,HBASE_CELL_VISIBILITY",
"-D" + ImportTsv.SEPARATOR_CONF_KEY + "=\u001b", tableName.getNameAsString() };
// 2 Data rows, one with valid label and one with invalid label
String data =
"KEY\u001bVALUE1\u001bVALUE2\u001bprivate\nKEY1\u001bVALUE1\u001bVALUE2\u001binvalid\n";
util.createTable(tableName, FAMILY);
doMROnTableTest(util, FAMILY, data, args, 1, 2);
util.deleteTable(tableName);
}
示例4: testBulkOutputWithTsvImporterTextMapperWithInvalidLabels
import org.apache.hadoop.hbase.TableName; //导入方法依赖的package包/类
@Test
public void testBulkOutputWithTsvImporterTextMapperWithInvalidLabels() throws Exception {
TableName tableName = TableName.valueOf("test-" + UUID.randomUUID());
Path hfiles = new Path(util.getDataTestDirOnTestFS(tableName.getNameAsString()), "hfiles");
// Prepare the arguments required for the test.
String[] args =
new String[] {
"-D" + ImportTsv.MAPPER_CONF_KEY
+ "=org.apache.hadoop.hbase.mapreduce.TsvImporterTextMapper",
"-D" + ImportTsv.BULK_OUTPUT_CONF_KEY + "=" + hfiles.toString(),
"-D" + ImportTsv.COLUMNS_CONF_KEY + "=HBASE_ROW_KEY,FAM:A,FAM:B,HBASE_CELL_VISIBILITY",
"-D" + ImportTsv.SEPARATOR_CONF_KEY + "=\u001b", tableName.getNameAsString() };
// 2 Data rows, one with valid label and one with invalid label
String data =
"KEY\u001bVALUE1\u001bVALUE2\u001bprivate\nKEY1\u001bVALUE1\u001bVALUE2\u001binvalid\n";
util.createTable(tableName, FAMILY);
doMROnTableTest(util, FAMILY, data, args, 1, 2);
util.deleteTable(tableName);
}
示例5: getTableDescriptor
import org.apache.hadoop.hbase.TableName; //导入方法依赖的package包/类
static HTableDescriptor getTableDescriptor(final TableName tableName, HConnection connection,
RpcRetryingCallerFactory rpcCallerFactory, final RpcControllerFactory rpcControllerFactory,
int operationTimeout) throws TableNotFoundException, IOException {
if (tableName == null) return null;
HTableDescriptor htd = executeCallable(new MasterCallable<HTableDescriptor>(connection) {
@Override
public HTableDescriptor call(int callTimeout) throws ServiceException {
PayloadCarryingRpcController controller = rpcControllerFactory.newController();
controller.setCallTimeout(callTimeout);
GetTableDescriptorsResponse htds;
GetTableDescriptorsRequest req =
RequestConverter.buildGetTableDescriptorsRequest(tableName);
htds = master.getTableDescriptors(controller, req);
if (!htds.getTableSchemaList().isEmpty()) {
return HTableDescriptor.convert(htds.getTableSchemaList().get(0));
}
return null;
}
}, rpcCallerFactory, operationTimeout);
if (htd != null) {
return htd;
}
throw new TableNotFoundException(tableName.getNameAsString());
}
示例6: getHTableDescriptor
import org.apache.hadoop.hbase.TableName; //导入方法依赖的package包/类
/**
* Connects to the master to get the table descriptor.
* @param tableName table name
* @throws IOException if the connection to master fails or if the table
* is not found.
* @deprecated Use {@link Admin#getTableDescriptor(TableName)} instead
*/
@Deprecated
@Override
public HTableDescriptor getHTableDescriptor(final TableName tableName)
throws IOException {
if (tableName == null) return null;
MasterKeepAliveConnection master = getKeepAliveMasterService();
GetTableDescriptorsResponse htds;
try {
GetTableDescriptorsRequest req =
RequestConverter.buildGetTableDescriptorsRequest(tableName);
htds = master.getTableDescriptors(null, req);
} catch (ServiceException se) {
throw ProtobufUtil.getRemoteException(se);
} finally {
master.close();
}
if (!htds.getTableSchemaList().isEmpty()) {
return HTableDescriptor.convert(htds.getTableSchemaList().get(0));
}
throw new TableNotFoundException(tableName.getNameAsString());
}
示例7: TablestoreBufferedMutator
import org.apache.hadoop.hbase.TableName; //导入方法依赖的package包/类
public TablestoreBufferedMutator(TablestoreConnection connection, TableName tableName) {
this.tableName = tableName;
this.connection = connection;
writeBuffer = new ConcurrentLinkedQueue<Mutation>();
this.writeBufferSize = this.connection.getConfiguration().getLong("hbase.client.write.buffer", 2097152);
this.currentWriteBufferSize = 0;
this.columnMapping = new ColumnMapping(tableName.getNameAsString(), this.connection.getConfiguration());
this.adapter = OTSAdapter.getInstance(this.connection.getTablestoreConf());
this.clearBufferOnFail = true;
}
示例8: buildBulkFiles
import org.apache.hadoop.hbase.TableName; //导入方法依赖的package包/类
private Path buildBulkFiles(TableName table, int value) throws Exception {
Path dir = util.getDataTestDirOnTestFS(table.getNameAsString());
Path bulk1 = new Path(dir, table.getNameAsString() + value);
FileSystem fs = util.getTestFileSystem();
buildHFiles(fs, bulk1, value);
return bulk1;
}
示例9: testMROnTableWithDeletes
import org.apache.hadoop.hbase.TableName; //导入方法依赖的package包/类
@Test
public void testMROnTableWithDeletes() throws Exception {
TableName tableName = TableName.valueOf("test-" + UUID.randomUUID());
// Prepare the arguments required for the test.
String[] args = new String[] {
"-D" + ImportTsv.MAPPER_CONF_KEY + "=org.apache.hadoop.hbase.mapreduce.TsvImporterMapper",
"-D" + ImportTsv.COLUMNS_CONF_KEY + "=HBASE_ROW_KEY,FAM:A,FAM:B,HBASE_CELL_VISIBILITY",
"-D" + ImportTsv.SEPARATOR_CONF_KEY + "=\u001b", tableName.getNameAsString() };
String data = "KEY\u001bVALUE1\u001bVALUE2\u001bsecret&private\n";
util.createTable(tableName, FAMILY);
doMROnTableTest(util, FAMILY, data, args, 1);
issueDeleteAndVerifyData(tableName);
util.deleteTable(tableName);
}
示例10: doQuarantineTest
import org.apache.hadoop.hbase.TableName; //导入方法依赖的package包/类
/**
* Test that use this should have a timeout, because this method could potentially wait forever.
*/
private void doQuarantineTest(TableName table, HBaseFsck hbck, int check,
int corrupt, int fail, int quar, int missing) throws Exception {
try {
setupTable(table);
assertEquals(ROWKEYS.length, countRows());
admin.flush(table); // flush is async.
// Mess it up by leaving a hole in the assignment, meta, and hdfs data
admin.disableTable(table);
String[] args = {"-sidelineCorruptHFiles", "-repairHoles", "-ignorePreCheckPermission",
table.getNameAsString()};
HBaseFsck res = hbck.exec(hbfsckExecutorService, args);
HFileCorruptionChecker hfcc = res.getHFilecorruptionChecker();
assertEquals(hfcc.getHFilesChecked(), check);
assertEquals(hfcc.getCorrupted().size(), corrupt);
assertEquals(hfcc.getFailures().size(), fail);
assertEquals(hfcc.getQuarantined().size(), quar);
assertEquals(hfcc.getMissing().size(), missing);
// its been fixed, verify that we can enable
admin.enableTableAsync(table);
while (!admin.isTableEnabled(table)) {
try {
Thread.sleep(250);
} catch (InterruptedException e) {
e.printStackTrace();
fail("Interrupted when trying to enable table " + table);
}
}
} finally {
cleanupTable(table);
}
}
示例11: testGenerateAndLoad
import org.apache.hadoop.hbase.TableName; //导入方法依赖的package包/类
@Test
public void testGenerateAndLoad() throws Exception {
LOG.info("Running test testGenerateAndLoad.");
TableName table = TableName.valueOf(NAME + "-" + UUID.randomUUID());
String cf = "d";
Path hfiles = new Path(
util.getDataTestDirOnTestFS(table.getNameAsString()), "hfiles");
String[] args = {
format("-D%s=%s", ImportTsv.BULK_OUTPUT_CONF_KEY, hfiles),
format("-D%s=HBASE_ROW_KEY,HBASE_TS_KEY,%s:c1,%s:c2",
ImportTsv.COLUMNS_CONF_KEY, cf, cf),
// configure the test harness to NOT delete the HFiles after they're
// generated. We need those for doLoadIncrementalHFiles
format("-D%s=false", TestImportTsv.DELETE_AFTER_LOAD_CONF),
table.getNameAsString()
};
// run the job, complete the load.
util.createTable(table, new String[]{cf});
Tool t = TestImportTsv.doMROnTableTest(util, cf, simple_tsv, args);
doLoadIncrementalHFiles(hfiles, table);
// validate post-conditions
validateDeletedPartitionsFile(t.getConf());
// clean up after ourselves.
util.deleteTable(table);
util.cleanupDataTestDirOnTestFS(table.getNameAsString());
LOG.info("testGenerateAndLoad completed successfully.");
}
示例12: enableTableRep
import org.apache.hadoop.hbase.TableName; //导入方法依赖的package包/类
/**
* Enable a table's replication switch.
* @param tableName name of the table
* @throws IOException if a remote or network exception occurs
*/
public void enableTableRep(final TableName tableName) throws IOException {
if (tableName == null) {
throw new IllegalArgumentException("Table name cannot be null");
}
try (Admin admin = this.connection.getAdmin()) {
if (!admin.tableExists(tableName)) {
throw new TableNotFoundException("Table '" + tableName.getNameAsString()
+ "' does not exists.");
}
}
byte[][] splits = getTableSplitRowKeys(tableName);
checkAndSyncTableDescToPeers(tableName, splits);
setTableRep(tableName, true);
}
示例13: relocateRegion
import org.apache.hadoop.hbase.TableName; //导入方法依赖的package包/类
@Override
public RegionLocations relocateRegion(final TableName tableName,
final byte [] row, int replicaId) throws IOException{
// Since this is an explicit request not to use any caching, finding
// disabled tables should not be desirable. This will ensure that an exception is thrown when
// the first time a disabled table is interacted with.
if (!tableName.equals(TableName.META_TABLE_NAME) && isTableDisabled(tableName)) {
throw new TableNotEnabledException(tableName.getNameAsString() + " is disabled.");
}
return locateRegion(tableName, row, false, true, replicaId);
}
示例14: TableNotFoundException
import org.apache.hadoop.hbase.TableName; //导入方法依赖的package包/类
public TableNotFoundException(TableName tableName) {
super(tableName.getNameAsString());
}
示例15: TableNotEnabledException
import org.apache.hadoop.hbase.TableName; //导入方法依赖的package包/类
/**
* @param tableName Name of table that is not enabled
*/
public TableNotEnabledException(TableName tableName) {
this(tableName.getNameAsString());
}