本文整理匯總了Java中org.apache.hadoop.hbase.HTableDescriptor類的典型用法代碼示例。如果您正苦於以下問題:Java HTableDescriptor類的具體用法?Java HTableDescriptor怎麽用?Java HTableDescriptor使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。
HTableDescriptor類屬於org.apache.hadoop.hbase包,在下文中一共展示了HTableDescriptor類的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: addWALEdits
import org.apache.hadoop.hbase.HTableDescriptor; //導入依賴的package包/類
private void addWALEdits(final TableName tableName, final HRegionInfo hri, final byte[] rowName,
final byte[] family, final int count, EnvironmentEdge ee, final WAL wal,
final HTableDescriptor htd, final MultiVersionConcurrencyControl mvcc) throws IOException {
String familyStr = Bytes.toString(family);
long txid = -1;
for (int j = 0; j < count; j++) {
byte[] qualifierBytes = Bytes.toBytes(Integer.toString(j));
byte[] columnBytes = Bytes.toBytes(familyStr + ":" + Integer.toString(j));
WALEdit edit = new WALEdit();
edit.add(new KeyValue(rowName, family, qualifierBytes, ee.currentTime(), columnBytes));
// uses WALKey instead of HLogKey on purpose. will only work for tests where we don't care
// about legacy coprocessors
txid = wal.append(htd, hri, new WALKey(hri.getEncodedNameAsBytes(), tableName,
ee.currentTime(), mvcc), edit, true);
}
if (-1 != txid) {
wal.sync(txid);
}
}
示例2: initialize
import org.apache.hadoop.hbase.HTableDescriptor; //導入依賴的package包/類
public void initialize(InputSplit split, Configuration conf) throws IOException {
this.scan = TableMapReduceUtil.convertStringToScan(split.getScan());
this.split = split;
HTableDescriptor htd = split.htd;
HRegionInfo hri = this.split.getRegionInfo();
FileSystem fs = FSUtils.getCurrentFileSystem(conf);
// region is immutable, this should be fine,
// otherwise we have to set the thread read point
scan.setIsolationLevel(IsolationLevel.READ_UNCOMMITTED);
// disable caching of data blocks
scan.setCacheBlocks(false);
scanner =
new ClientSideRegionScanner(conf, fs, new Path(split.restoreDir), htd, hri, scan, null);
}
示例3: enableTable
import org.apache.hadoop.hbase.HTableDescriptor; //導入依賴的package包/類
/**
* Enable a table, if existed.
*
* @param tableName name of table to enable
* @throws IOException-if a remote or network exception occurs
*/
public void enableTable(TableName tableName) throws IOException {
HTableDescriptor desc = admin.getTableDescriptor(tableName);
if (isIndexTable(desc)) {
throw new TableNotFoundException(tableName);
}
IndexTableDescriptor indexDesc = new IndexTableDescriptor(desc);
if (indexDesc.hasIndex()) {
for (IndexSpecification indexSpec : indexDesc.getIndexSpecifications()) {
if (admin.tableExists(indexSpec.getIndexTableName())) {
admin.enableTable(indexSpec.getIndexTableName());
} else {
throw new IndexMissingException(tableName, indexSpec);
}
}
}
admin.enableTable(tableName);
}
示例4: testCreateTwiceWithSameNonce
import org.apache.hadoop.hbase.HTableDescriptor; //導入依賴的package包/類
@Test(timeout=60000)
public void testCreateTwiceWithSameNonce() throws Exception {
final TableName tableName = TableName.valueOf("testCreateTwiceWithSameNonce");
final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
final HTableDescriptor htd = MasterProcedureTestingUtility.createHTD(tableName, "f");
final HRegionInfo[] regions = ModifyRegionUtils.createHRegionInfos(htd, null);
// create the table
long procId1 = procExec.submitProcedure(
new CreateTableProcedure(procExec.getEnvironment(), htd, regions), nonceGroup, nonce);
// create another with the same name
long procId2 = procExec.submitProcedure(
new CreateTableProcedure(procExec.getEnvironment(), htd, regions), nonceGroup, nonce);
ProcedureTestingUtility.waitProcedure(procExec, procId1);
ProcedureTestingUtility.assertProcNotFailed(procExec.getResult(procId1));
ProcedureTestingUtility.waitProcedure(procExec, procId2);
ProcedureTestingUtility.assertProcNotFailed(procExec.getResult(procId2));
assertTrue(procId1 == procId2);
}
示例5: addEdits
import org.apache.hadoop.hbase.HTableDescriptor; //導入依賴的package包/類
protected void addEdits(WAL log,
HRegionInfo hri,
HTableDescriptor htd,
int times,
MultiVersionConcurrencyControl mvcc)
throws IOException {
final byte[] row = Bytes.toBytes("row");
for (int i = 0; i < times; i++) {
long timestamp = System.currentTimeMillis();
WALEdit cols = new WALEdit();
cols.add(new KeyValue(row, row, row, timestamp, row));
WALKey key = new WALKey(hri.getEncodedNameAsBytes(), htd.getTableName(),
WALKey.NO_SEQUENCE_ID, timestamp, WALKey.EMPTY_UUIDS, HConstants.NO_NONCE,
HConstants.NO_NONCE, mvcc);
log.append(htd, hri, key, cols, true);
}
log.sync();
}
示例6: rewriteTableDescriptor
import org.apache.hadoop.hbase.HTableDescriptor; //導入依賴的package包/類
private static void rewriteTableDescriptor(final FileSystem fs, final FileStatus status,
final HTableDescriptor td)
throws IOException {
Path tableInfoDir = status.getPath().getParent();
Path tableDir = tableInfoDir.getParent();
writeTableDescriptor(fs, td, tableDir, status);
}
示例7: initHRegion
import org.apache.hadoop.hbase.HTableDescriptor; //導入依賴的package包/類
Region initHRegion (TableName tableName, String callingMethod,
Configuration conf, Class<?> [] implClasses, byte [][] families)
throws IOException {
HTableDescriptor htd = new HTableDescriptor(tableName);
for(byte [] family : families) {
htd.addFamily(new HColumnDescriptor(family));
}
HRegionInfo info = new HRegionInfo(tableName, null, null, false);
Path path = new Path(DIR + callingMethod);
HRegion r = HRegion.createHRegion(info, path, conf, htd);
// this following piece is a hack.
RegionCoprocessorHost host = new RegionCoprocessorHost(r, null, conf);
r.setCoprocessorHost(host);
for (Class<?> implClass : implClasses) {
host.load(implClass, Coprocessor.PRIORITY_USER, conf);
Coprocessor c = host.findCoprocessor(implClass.getName());
assertNotNull(c);
}
// Here we have to call pre and postOpen explicitly.
host.preOpen();
host.postOpen();
return r;
}
示例8: ClientSideRegionScanner
import org.apache.hadoop.hbase.HTableDescriptor; //導入依賴的package包/類
public ClientSideRegionScanner(Configuration conf, FileSystem fs,
Path rootDir, HTableDescriptor htd, HRegionInfo hri, Scan scan, ScanMetrics scanMetrics)
throws IOException {
// region is immutable, set isolation level
scan.setIsolationLevel(IsolationLevel.READ_UNCOMMITTED);
// open region from the snapshot directory
this.region = HRegion.openHRegion(conf, fs, rootDir, hri, htd, null, null, null);
// create an internal region scanner
this.scanner = region.getScanner(scan);
values = new ArrayList<Cell>();
if (scanMetrics == null) {
initScanMetrics(scan);
} else {
this.scanMetrics = scanMetrics;
}
region.startRegionOperation();
}
示例9: testRegionWithFamiliesAndSpecifiedTableName
import org.apache.hadoop.hbase.HTableDescriptor; //導入依賴的package包/類
private HRegion testRegionWithFamiliesAndSpecifiedTableName(TableName tableName,
byte[]... families)
throws IOException {
HRegionInfo hRegionInfo = new HRegionInfo(tableName);
HTableDescriptor hTableDescriptor = new HTableDescriptor(tableName);
for (byte[] family : families) {
hTableDescriptor.addFamily(new HColumnDescriptor(family));
}
// TODO We need a way to do this without creating files
return HRegion.createHRegion(hRegionInfo,
new Path(testFolder.newFolder().toURI()),
conf,
hTableDescriptor,
log);
}
示例10: handleRegionStartKeyNotEmpty
import org.apache.hadoop.hbase.HTableDescriptor; //導入依賴的package包/類
/**
* This is a special case hole -- when the first region of a table is
* missing from META, HBase doesn't acknowledge the existance of the
* table.
*/
@Override
public void handleRegionStartKeyNotEmpty(HbckInfo next) throws IOException {
errors.reportError(ERROR_CODE.FIRST_REGION_STARTKEY_NOT_EMPTY,
"First region should start with an empty key. Creating a new " +
"region and regioninfo in HDFS to plug the hole.",
getTableInfo(), next);
HTableDescriptor htd = getTableInfo().getHTD();
// from special EMPTY_START_ROW to next region's startKey
HRegionInfo newRegion = new HRegionInfo(htd.getTableName(),
HConstants.EMPTY_START_ROW, next.getStartKey());
// TODO test
HRegion region = HBaseFsckRepair.createHDFSRegionDir(conf, newRegion, htd);
LOG.info("Table region start key was not empty. Created new empty region: "
+ newRegion + " " +region);
fixes++;
}
示例11: generateHBaseDatasetNullStr
import org.apache.hadoop.hbase.HTableDescriptor; //導入依賴的package包/類
public static void generateHBaseDatasetNullStr(Connection conn, Admin admin, TableName tableName, int numberRegions) throws Exception {
if (admin.tableExists(tableName)) {
admin.disableTable(tableName);
admin.deleteTable(tableName);
}
HTableDescriptor desc = new HTableDescriptor(tableName);
desc.addFamily(new HColumnDescriptor("f"));
if (numberRegions > 1) {
admin.createTable(desc, Arrays.copyOfRange(SPLIT_KEYS, 0, numberRegions-1));
} else {
admin.createTable(desc);
}
BufferedMutator table = conn.getBufferedMutator(tableName);
Put p = new Put("a1".getBytes());
p.addColumn("f".getBytes(), "c1".getBytes(), "".getBytes());
p.addColumn("f".getBytes(), "c2".getBytes(), "".getBytes());
p.addColumn("f".getBytes(), "c3".getBytes(), "5".getBytes());
p.addColumn("f".getBytes(), "c4".getBytes(), "".getBytes());
table.mutate(p);
table.close();
}
示例12: shouldBulkLoadSingleFamilyHLog
import org.apache.hadoop.hbase.HTableDescriptor; //導入依賴的package包/類
@Test
public void shouldBulkLoadSingleFamilyHLog() throws IOException {
when(log.append(any(HTableDescriptor.class), any(HRegionInfo.class),
any(WALKey.class), argThat(bulkLogWalEditType(WALEdit.BULK_LOAD)),
any(boolean.class))).thenAnswer(new Answer() {
public Object answer(InvocationOnMock invocation) {
WALKey walKey = invocation.getArgumentAt(2, WALKey.class);
MultiVersionConcurrencyControl mvcc = walKey.getMvcc();
if (mvcc != null) {
MultiVersionConcurrencyControl.WriteEntry we = mvcc.begin();
walKey.setWriteEntry(we);
}
return 01L;
};
});
testRegionWithFamilies(family1).bulkLoadHFiles(withFamilyPathsFor(family1), false, null);
verify(log).sync(anyLong());
}
示例13: TestScanRow
import org.apache.hadoop.hbase.HTableDescriptor; //導入依賴的package包/類
public TestScanRow() throws IOException, InterruptedException {
Configuration config = HBaseConfiguration.create();
Connection connection = ConnectionFactory.createConnection(config);
familyName = config.get("hbase.client.tablestore.family");
columnName = "col_1";
columnValue = "col_1_var";
TableName tableName = TableName.valueOf(config.get("hbase.client.tablestore.table"));
if (!connection.getAdmin().tableExists(tableName)) {
HTableDescriptor descriptor = new HTableDescriptor(tableName);
connection.getAdmin().createTable(descriptor);
TimeUnit.SECONDS.sleep(1);
}
table = connection.getTable(tableName);
}
示例14: writeMarker
import org.apache.hadoop.hbase.HTableDescriptor; //導入依賴的package包/類
private static long writeMarker(final WAL wal, final HTableDescriptor htd, final HRegionInfo hri,
final WALEdit edit, final MultiVersionConcurrencyControl mvcc, final boolean sync)
throws IOException {
// TODO: Pass in current time to use?
WALKey key =
new HLogKey(hri.getEncodedNameAsBytes(), hri.getTable(), System.currentTimeMillis(), mvcc);
// Add it to the log but the false specifies that we don't need to add it to the memstore
long trx = MultiVersionConcurrencyControl.NONE;
try {
trx = wal.append(htd, hri, key, edit, false);
if (sync) wal.sync(trx);
} finally {
// If you get hung here, is it a real WAL or a mocked WAL? If the latter, you need to
// trip the latch that is inside in getWriteEntry up in your mock. See down in the append
// called from onEvent in FSHLog.
MultiVersionConcurrencyControl.WriteEntry we = key.getWriteEntry();
if (mvcc != null && we != null) mvcc.complete(we);
}
return trx;
}
示例15: testDisableTableAndRestart
import org.apache.hadoop.hbase.HTableDescriptor; //導入依賴的package包/類
@Test(timeout = 300000)
public void testDisableTableAndRestart() throws Exception {
final TableName tableName = TableName.valueOf("testDisableTableAndRestart");
final MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
final HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
final HTableDescriptor desc = new HTableDescriptor(tableName);
desc.addFamily(new HColumnDescriptor(FAMILYNAME));
admin.createTable(desc);
admin.disableTable(tableName);
TEST_UTIL.waitTableDisabled(tableName.getName());
TEST_UTIL.getHBaseCluster().shutdown();
TEST_UTIL.getHBaseCluster().waitUntilShutDown();
TEST_UTIL.restartHBaseCluster(2);
admin.enableTable(tableName);
TEST_UTIL.waitTableEnabled(tableName);
}