本文整理汇总了Java中org.apache.hadoop.hbase.TableName.valueOf方法的典型用法代码示例。如果您正苦于以下问题:Java TableName.valueOf方法的具体用法?Java TableName.valueOf怎么用?Java TableName.valueOf使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hbase.TableName
的用法示例。
在下文中一共展示了TableName.valueOf方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testSocketClosed
import org.apache.hadoop.hbase.TableName; //导入方法依赖的package包/类
@Test(expected=RetriesExhaustedException.class)
public void testSocketClosed() throws IOException, InterruptedException {
String tableName = "testSocketClosed";
TableName name = TableName.valueOf(tableName);
UTIL.createTable(name, fam1).close();
Configuration conf = new Configuration(UTIL.getConfiguration());
conf.set(RpcClientFactory.CUSTOM_RPC_CLIENT_IMPL_CONF_KEY,
MyRpcClientImpl.class.getName());
conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 2);
Connection connection = ConnectionFactory.createConnection(conf);
Table table = connection.getTable(TableName.valueOf(tableName));
table.get(new Get("asd".getBytes()));
connection.close();
for (Socket socket : MyRpcClientImpl.savedSockets) {
assertTrue("Socket + " + socket + " is not closed", socket.isClosed());
}
}
示例2: createTable
import org.apache.hadoop.hbase.TableName; //导入方法依赖的package包/类
public void createTable() throws Exception {
HColumnDescriptor family1 = new HColumnDescriptor(firstFamily);
HColumnDescriptor family2 = new HColumnDescriptor(secondFamily);
family1.setMaxVersions(3);
family2.setMaxVersions(3);
HTableDescriptor descriptor = new HTableDescriptor(TableName.valueOf(nameSpaceName + ":" + tableName));
descriptor.addFamily(family1);
descriptor.addFamily(family2);
descriptor.setRegionReplication(3); // replication
admin.createTable(descriptor);
// admin.split(TableName.valueOf("StudentInfo:student1"),
// Bytes.toBytes("10"));
// admin.split(TableName.valueOf("StudentInfo:student1"),
// Bytes.toBytes("20"));
// admin.split(TableName.valueOf("StudentInfo:student1"),
// Bytes.toBytes("30"));
// admin.split(TableName.valueOf("StudentInfo:student1"),
// Bytes.toBytes("40"));
// admin.split(TableName.valueOf("StudentInfo:student1"),
// Bytes.toBytes("50"));
// admin.split(TableName.valueOf("StudentInfo:student1"),
// Bytes.toBytes("60"));
}
示例3: disableTable
import org.apache.hadoop.hbase.TableName; //导入方法依赖的package包/类
/**
* 禁用表
*
* @param tableName
*/
public void disableTable(String tableName) {
HBaseConfiguration hBaseConfiguration = new HBaseConfiguration();
Admin admin = hBaseConfiguration.admin();
TableName tn = TableName.valueOf(tableName);
try {
if (admin.tableExists(tn)) {
admin.disableTable(tn);
} else {
log.info("表名【" + tableName + "】不存在");
}
} catch (IOException e) {
e.printStackTrace();
} finally {
hBaseConfiguration.close();
}
}
示例4: testMissingLastRegion
import org.apache.hadoop.hbase.TableName; //导入方法依赖的package包/类
/**
* This creates and fixes a bad table with missing last region -- hole in meta and data missing in
* the fs.
*/
@Test(timeout=120000)
public void testMissingLastRegion() throws Exception {
TableName table =
TableName.valueOf("testMissingLastRegion");
try {
setupTable(table);
assertEquals(ROWKEYS.length, countRows());
// Mess it up by leaving a hole in the assignment, meta, and hdfs data
admin.disableTable(table);
deleteRegion(conf, tbl.getTableDescriptor(), Bytes.toBytes("C"), Bytes.toBytes(""), true,
true, true);
admin.enableTable(table);
HBaseFsck hbck = doFsck(conf, false);
assertErrors(hbck, new ERROR_CODE[] { ERROR_CODE.LAST_REGION_ENDKEY_NOT_EMPTY });
// fix hole
doFsck(conf, true);
// check that hole fixed
assertNoErrors(doFsck(conf, false));
} finally {
cleanupTable(table);
}
}
示例5: shouldBulkLoadManyFamilyHLogEvenWhenTableNameNamespaceSpecified
import org.apache.hadoop.hbase.TableName; //导入方法依赖的package包/类
@Test
public void shouldBulkLoadManyFamilyHLogEvenWhenTableNameNamespaceSpecified() throws IOException {
when(log.append(any(HTableDescriptor.class), any(HRegionInfo.class),
any(WALKey.class), argThat(bulkLogWalEditType(WALEdit.BULK_LOAD)),
any(boolean.class))).thenAnswer(new Answer() {
public Object answer(InvocationOnMock invocation) {
WALKey walKey = invocation.getArgumentAt(2, WALKey.class);
MultiVersionConcurrencyControl mvcc = walKey.getMvcc();
if (mvcc != null) {
MultiVersionConcurrencyControl.WriteEntry we = mvcc.begin();
walKey.setWriteEntry(we);
}
return 01L;
};
});
TableName tableName = TableName.valueOf("test", "test");
testRegionWithFamiliesAndSpecifiedTableName(tableName, family1, family2)
.bulkLoadHFiles(withFamilyPathsFor(family1, family2), false, null);
verify(log).sync(anyLong());
}
示例6: assertSnapshotRequestIsValid
import org.apache.hadoop.hbase.TableName; //导入方法依赖的package包/类
/**
* Check to make sure that the description of the snapshot requested is valid
* @param snapshot description of the snapshot
* @throws IllegalArgumentException if the name of the snapshot or the name of the table to
* snapshot are not valid names.
*/
public static void assertSnapshotRequestIsValid(HBaseProtos.SnapshotDescription snapshot)
throws IllegalArgumentException {
// make sure the snapshot name is valid
TableName.isLegalTableQualifierName(Bytes.toBytes(snapshot.getName()), true);
if(snapshot.hasTable()) {
// make sure the table name is valid, this will implicitly check validity
TableName tableName = TableName.valueOf(snapshot.getTable());
if (tableName.isSystemTable()) {
throw new IllegalArgumentException("System table snapshots are not allowed");
}
}
}
示例7: testBasicHalfMapFile
import org.apache.hadoop.hbase.TableName; //导入方法依赖的package包/类
/**
* Write a file and then assert that we can read from top and bottom halves
* using two HalfMapFiles.
* @throws Exception
*/
public void testBasicHalfMapFile() throws Exception {
final HRegionInfo hri =
new HRegionInfo(TableName.valueOf("testBasicHalfMapFileTb"));
HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem(
conf, fs, new Path(this.testDir, hri.getTable().getNameAsString()), hri);
HFileContext meta = new HFileContextBuilder().withBlockSize(2*1024).build();
StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, this.fs)
.withFilePath(regionFs.createTempName())
.withFileContext(meta)
.build();
writeStoreFile(writer);
Path sfPath = regionFs.commitStoreFile(TEST_FAMILY, writer.getPath());
StoreFile sf = new StoreFile(this.fs, sfPath, conf, cacheConf,
BloomType.NONE);
checkHalfHFile(regionFs, sf);
}
示例8: testScanForSuperUserWithFewerLabelAuths
import org.apache.hadoop.hbase.TableName; //导入方法依赖的package包/类
@Test
public void testScanForSuperUserWithFewerLabelAuths() throws Throwable {
String[] auths = { SECRET };
String user = "admin";
try (Connection conn = ConnectionFactory.createConnection(conf)) {
VisibilityClient.setAuths(conn, auths, user);
}
TableName tableName = TableName.valueOf(TEST_NAME.getMethodName());
final Table table = createTableAndWriteDataWithLabels(tableName, SECRET + "&" + CONFIDENTIAL
+ "&!" + PRIVATE, SECRET + "&!" + PRIVATE);
PrivilegedExceptionAction<Void> scanAction = new PrivilegedExceptionAction<Void>() {
public Void run() throws Exception {
Scan s = new Scan();
s.setAuthorizations(new Authorizations(SECRET, CONFIDENTIAL));
try (Connection connection = ConnectionFactory.createConnection(conf);
Table t = connection.getTable(table.getName())) {
ResultScanner scanner = t.getScanner(s);
Result[] result = scanner.next(5);
assertTrue(result.length == 2);
}
return null;
}
};
SUPERUSER.runAs(scanAction);
}
示例9: testVisibilityLabelsInScanThatDoesNotMatchAnyDefinedLabels
import org.apache.hadoop.hbase.TableName; //导入方法依赖的package包/类
@Test
public void testVisibilityLabelsInScanThatDoesNotMatchAnyDefinedLabels() throws Exception {
TableName tableName = TableName.valueOf(TEST_NAME.getMethodName());
try ( Table table = createTableAndWriteDataWithLabels(tableName, "(" + SECRET + "|"
+ CONFIDENTIAL + ")", PRIVATE)){
Scan s = new Scan();
s.setAuthorizations(new Authorizations("SAMPLE"));
ResultScanner scanner = table.getScanner(s);
Result[] next = scanner.next(3);
assertTrue(next.length == 0);
}
}
示例10: testEnableReplicationForExplicitSetTableCfs
import org.apache.hadoop.hbase.TableName; //导入方法依赖的package包/类
@Test(timeout = 300000)
public void testEnableReplicationForExplicitSetTableCfs() throws Exception {
TableName tn = TableName.valueOf("testEnableReplicationForSetTableCfs");
String peerId = "2";
if (admin2.isTableAvailable(tableName)) {
admin2.disableTable(tableName);
admin2.deleteTable(tableName);
}
assertFalse("Table should not exists in the peer cluster", admin2.isTableAvailable(tableName));
Map<TableName, ? extends Collection<String>> tableCfs =
new HashMap<TableName, Collection<String>>();
tableCfs.put(tn, null);
try {
adminExt.setPeerTableCFs(peerId, tableCfs);
adminExt.enableTableRep(tableName);
assertFalse("Table should not be created if user has set table cfs explicitly for the "
+ "peer and this is not part of that collection",
admin2.isTableAvailable(tableName));
tableCfs.put(tableName, null);
adminExt.setPeerTableCFs(peerId, tableCfs);
adminExt.enableTableRep(tableName);
assertTrue(
"Table should be created if user has explicitly added table into table cfs collection",
admin2.isTableAvailable(tableName));
} finally {
adminExt.removePeerTableCFs(peerId, adminExt.getPeerTableCFs(peerId));
adminExt.disableTableRep(tableName);
}
}
示例11: testEnableDisableRemove
import org.apache.hadoop.hbase.TableName; //导入方法依赖的package包/类
/**
* Test that Constraints are properly enabled, disabled, and removed
*
* @throws Exception
*/
@SuppressWarnings("unchecked")
@Test
public void testEnableDisableRemove() throws Exception {
HTableDescriptor desc = new HTableDescriptor(TableName.valueOf("table"));
// check general enabling/disabling of constraints
// first add a constraint
Constraints.add(desc, AllPassConstraint.class);
// make sure everything is enabled
assertTrue(Constraints.enabled(desc, AllPassConstraint.class));
assertTrue(desc.hasCoprocessor(ConstraintProcessor.class.getName()));
// check disabling
Constraints.disable(desc);
assertFalse(desc.hasCoprocessor(ConstraintProcessor.class.getName()));
// make sure the added constraints are still present
assertTrue(Constraints.enabled(desc, AllPassConstraint.class));
// check just removing the single constraint
Constraints.remove(desc, AllPassConstraint.class);
assertFalse(Constraints.has(desc, AllPassConstraint.class));
// Add back the single constraint
Constraints.add(desc, AllPassConstraint.class);
// and now check that when we remove constraints, all are gone
Constraints.remove(desc);
assertFalse(desc.hasCoprocessor(ConstraintProcessor.class.getName()));
assertFalse(Constraints.has(desc, AllPassConstraint.class));
}
示例12: testGetTableDescriptor
import org.apache.hadoop.hbase.TableName; //导入方法依赖的package包/类
@Test (timeout=300000)
public void testGetTableDescriptor() throws IOException {
HColumnDescriptor fam1 = new HColumnDescriptor("fam1");
HColumnDescriptor fam2 = new HColumnDescriptor("fam2");
HColumnDescriptor fam3 = new HColumnDescriptor("fam3");
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("myTestTable"));
htd.addFamily(fam1);
htd.addFamily(fam2);
htd.addFamily(fam3);
this.admin.createTable(htd);
Table table = new HTable(TEST_UTIL.getConfiguration(), htd.getTableName());
HTableDescriptor confirmedHtd = table.getTableDescriptor();
assertEquals(htd.compareTo(confirmedHtd), 0);
table.close();
}
示例13: testTableNotFoundExceptionWithoutAnyTables
import org.apache.hadoop.hbase.TableName; //导入方法依赖的package包/类
/**
* For HADOOP-2579
* @throws IOException
*/
@Test (expected=TableNotFoundException.class, timeout=300000)
public void testTableNotFoundExceptionWithoutAnyTables() throws IOException {
TableName tableName = TableName
.valueOf("testTableNotFoundExceptionWithoutAnyTables");
Table ht = new HTable(TEST_UTIL.getConfiguration(), tableName);
ht.get(new Get("e".getBytes()));
}
示例14: initialize
import org.apache.hadoop.hbase.TableName; //导入方法依赖的package包/类
@Override
protected void initialize(JobContext context) throws IOException {
// Do we have to worry about mis-matches between the Configuration from setConf and the one
// in this context?
TableName tableName = TableName.valueOf(conf.get(INPUT_TABLE));
try {
initializeTable(ConnectionFactory.createConnection(new Configuration(conf)), tableName);
} catch (Exception e) {
LOG.error(StringUtils.stringifyException(e));
}
}
示例15: testRecoveryAndDoubleExecutionOnline
import org.apache.hadoop.hbase.TableName; //导入方法依赖的package包/类
@Test(timeout = 60000)
public void testRecoveryAndDoubleExecutionOnline() throws Exception {
final TableName tableName = TableName.valueOf("testRecoveryAndDoubleExecutionOnline");
final String cf4 = "cf4";
final HColumnDescriptor columnDescriptor = new HColumnDescriptor(cf4);
int oldBlockSize = columnDescriptor.getBlocksize();
int newBlockSize = 4 * oldBlockSize;
final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
// create the table
MasterProcedureTestingUtility.createTable(procExec, tableName, null, "f1", "f2", cf4);
ProcedureTestingUtility.waitNoProcedureRunning(procExec);
ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true);
// Start the Modify procedure && kill the executor
columnDescriptor.setBlocksize(newBlockSize);
long procId = procExec.submitProcedure(
new ModifyColumnFamilyProcedure(procExec.getEnvironment(), tableName, columnDescriptor),
nonceGroup,
nonce);
// Restart the executor and execute the step twice
int numberOfSteps = ModifyColumnFamilyState.values().length;
MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(procExec, procId, numberOfSteps,
ModifyColumnFamilyState.values());
MasterProcedureTestingUtility.validateColumnFamilyModification(UTIL.getHBaseCluster()
.getMaster(), tableName, cf4, columnDescriptor);
}