本文整理汇总了Java中org.apache.hadoop.hbase.client.Connection.getTable方法的典型用法代码示例。如果您正苦于以下问题:Java Connection.getTable方法的具体用法?Java Connection.getTable怎么用?Java Connection.getTable使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hbase.client.Connection
的用法示例。
在下文中一共展示了Connection.getTable方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: obtainToken
import org.apache.hadoop.hbase.client.Connection; //导入方法依赖的package包/类
/**
* Obtain and return an authentication token for the current user.
* @param conn The HBase cluster connection
* @return the authentication token instance
*/
public static Token<AuthenticationTokenIdentifier> obtainToken(
Connection conn) throws IOException {
Table meta = null;
try {
meta = conn.getTable(TableName.META_TABLE_NAME);
CoprocessorRpcChannel rpcChannel = meta.coprocessorService(HConstants.EMPTY_START_ROW);
AuthenticationProtos.AuthenticationService.BlockingInterface service =
AuthenticationProtos.AuthenticationService.newBlockingStub(rpcChannel);
AuthenticationProtos.GetAuthenticationTokenResponse response = service.getAuthenticationToken(null,
AuthenticationProtos.GetAuthenticationTokenRequest.getDefaultInstance());
return ProtobufUtil.toToken(response.getToken());
} catch (ServiceException se) {
ProtobufUtil.toIOException(se);
} finally {
if (meta != null) {
meta.close();
}
}
// dummy return for ServiceException block
return null;
}
示例2: fixMetaHoleOnlineAndAddReplicas
import org.apache.hadoop.hbase.client.Connection; //导入方法依赖的package包/类
/**
* Puts the specified HRegionInfo into META with replica related columns
*/
public static void fixMetaHoleOnlineAndAddReplicas(Configuration conf,
HRegionInfo hri, Collection<ServerName> servers, int numReplicas) throws IOException {
Connection conn = ConnectionFactory.createConnection(conf);
Table meta = conn.getTable(TableName.META_TABLE_NAME);
Put put = MetaTableAccessor.makePutFromRegionInfo(hri);
if (numReplicas > 1) {
Random r = new Random();
ServerName[] serversArr = servers.toArray(new ServerName[servers.size()]);
for (int i = 1; i < numReplicas; i++) {
ServerName sn = serversArr[r.nextInt(serversArr.length)];
// the column added here is just to make sure the master is able to
// see the additional replicas when it is asked to assign. The
// final value of these columns will be different and will be updated
// by the actual regionservers that start hosting the respective replicas
MetaTableAccessor.addLocation(put, sn, sn.getStartcode(), -1, i);
}
}
meta.put(put);
meta.close();
conn.close();
}
示例3: batch
import org.apache.hadoop.hbase.client.Connection; //导入方法依赖的package包/类
/**
* Do the changes and handle the pool
* @param tableName table to insert into
* @param allRows list of actions
* @throws IOException
*/
protected void batch(TableName tableName, Collection<List<Row>> allRows) throws IOException {
if (allRows.isEmpty()) {
return;
}
Table table = null;
try {
// See https://en.wikipedia.org/wiki/Double-checked_locking
Connection connection = this.sharedHtableCon;
if (connection == null) {
synchronized (sharedHtableConLock) {
connection = this.sharedHtableCon;
if (connection == null) {
connection = this.sharedHtableCon = ConnectionFactory.createConnection(this.conf);
}
}
}
table = connection.getTable(tableName);
for (List<Row> rows : allRows) {
table.batch(rows);
}
} catch (InterruptedException ix) {
throw (InterruptedIOException)new InterruptedIOException().initCause(ix);
} finally {
if (table != null) {
table.close();
}
}
}
示例4: initializeTable
import org.apache.hadoop.hbase.client.Connection; //导入方法依赖的package包/类
/**
* Allows subclasses to initialize the table information.
*
* @param connection The Connection to the HBase cluster. MUST be unmanaged. We will close.
* @param tableName The {@link TableName} of the table to process.
* @throws IOException
*/
protected void initializeTable(Connection connection, TableName tableName) throws IOException {
if (this.table != null || this.connection != null) {
LOG.warn("initializeTable called multiple times. Overwriting connection and table " +
"reference; TableInputFormatBase will not close these old references when done.");
}
this.table = (HTable) connection.getTable(tableName);
this.connection = connection;
}
示例5: LocalScannerInParallel
import org.apache.hadoop.hbase.client.Connection; //导入方法依赖的package包/类
public LocalScannerInParallel(Connection conn, IndexTableRelation relation, Scan scan) throws IOException {
super(conn, relation, scan);
table = conn.getTable(relation.getTableName());
RegionLocator locator = conn.getRegionLocator(relation.getTableName());
regionLocationQueue = new LinkedList<>(locator.getAllRegionLocations());
addNewScanner(true);
for (int i = 1; i < MAX_SCANNER_SIZE; ++i) {
addNewScanner(false);
}
}
示例6: initializeTable
import org.apache.hadoop.hbase.client.Connection; //导入方法依赖的package包/类
/**
* Allows subclasses to initialize the table information.
*
* @param connection The {@link Connection} to the HBase cluster. MUST be unmanaged. We will close.
* @param tableName The {@link TableName} of the table to process.
* @throws IOException
*/
protected void initializeTable(Connection connection, TableName tableName) throws IOException {
if (this.table != null || this.connection != null) {
LOG.warn("initializeTable called multiple times. Overwriting connection and table " +
"reference; TableInputFormatBase will not close these old references when done.");
}
this.table = connection.getTable(tableName);
this.regionLocator = connection.getRegionLocator(tableName);
this.admin = connection.getAdmin();
this.connection = connection;
}
示例7: grant
import org.apache.hadoop.hbase.client.Connection; //导入方法依赖的package包/类
/**
* @param connection The Connection instance to use
* Grant global permissions for the specified user.
*/
public static void grant(final Connection connection, final String userName,
final Permission.Action... actions) throws Throwable {
PayloadCarryingRpcController controller
= ((ClusterConnection) connection).getRpcControllerFactory().newController();
try (Table table = connection.getTable(ACL_TABLE_NAME)) {
ProtobufUtil.grant(controller, getAccessControlServiceStub(table), userName, actions);
}
}
示例8: revoke
import org.apache.hadoop.hbase.client.Connection; //导入方法依赖的package包/类
/**
* Revokes the permission on the table for the specified user.
* @param connection The Connection instance to use
* @param namespace
* @param userName
* @param actions
* @throws Throwable
*/
public static void revoke(final Connection connection, final String namespace,
final String userName, final Permission.Action... actions) throws Throwable {
PayloadCarryingRpcController controller
= ((ClusterConnection) connection).getRpcControllerFactory().newController();
try (Table table = connection.getTable(ACL_TABLE_NAME)) {
ProtobufUtil.revoke(controller, getAccessControlServiceStub(table), userName, namespace,
actions);
}
}
示例9: testRegionReplicaReplicationForFlushAndCompaction
import org.apache.hadoop.hbase.client.Connection; //导入方法依赖的package包/类
@Test (timeout = 240000)
public void testRegionReplicaReplicationForFlushAndCompaction() throws Exception {
// Tests a table with region replication 3. Writes some data, and causes flushes and
// compactions. Verifies that the data is readable from the replicas. Note that this
// does not test whether the replicas actually pick up flushed files and apply compaction
// to their stores
int regionReplication = 3;
TableName tableName = TableName.valueOf("testRegionReplicaReplicationForFlushAndCompaction");
HTableDescriptor htd = HTU.createTableDescriptor(tableName.toString());
htd.setRegionReplication(regionReplication);
HTU.getHBaseAdmin().createTable(htd);
Connection connection = ConnectionFactory.createConnection(HTU.getConfiguration());
Table table = connection.getTable(tableName);
try {
// load the data to the table
for (int i = 0; i < 6000; i += 1000) {
LOG.info("Writing data from " + i + " to " + (i+1000));
HTU.loadNumericRows(table, HBaseTestingUtility.fam1, i, i+1000);
LOG.info("flushing table");
HTU.flush(tableName);
LOG.info("compacting table");
HTU.compact(tableName, false);
}
verifyReplication(tableName, regionReplication, 0, 1000);
} finally {
table.close();
connection.close();
}
}
示例10: populateTable
import org.apache.hadoop.hbase.client.Connection; //导入方法依赖的package包/类
/**
* Populate table with known values.
*/
private void populateTable(final Connection connection, TableName table, int value)
throws Exception {
// create HFiles for different column families
LoadIncrementalHFiles lih = new LoadIncrementalHFiles(util.getConfiguration());
Path bulk1 = buildBulkFiles(table, value);
try (Table t = connection.getTable(table)) {
lih.doBulkLoad(bulk1, (HTable)t);
}
}
示例11: setOrClearAuths
import org.apache.hadoop.hbase.client.Connection; //导入方法依赖的package包/类
private static VisibilityLabelsResponse setOrClearAuths(Connection connection,
final String[] auths, final String user, final boolean setOrClear)
throws IOException, ServiceException, Throwable {
try (Table table = connection.getTable(LABELS_TABLE_NAME)) {
Batch.Call<VisibilityLabelsService, VisibilityLabelsResponse> callable =
new Batch.Call<VisibilityLabelsService, VisibilityLabelsResponse>() {
ServerRpcController controller = new ServerRpcController();
BlockingRpcCallback<VisibilityLabelsResponse> rpcCallback =
new BlockingRpcCallback<VisibilityLabelsResponse>();
public VisibilityLabelsResponse call(VisibilityLabelsService service) throws IOException {
SetAuthsRequest.Builder setAuthReqBuilder = SetAuthsRequest.newBuilder();
setAuthReqBuilder.setUser(ByteStringer.wrap(Bytes.toBytes(user)));
for (String auth : auths) {
if (auth.length() > 0) {
setAuthReqBuilder.addAuth(ByteStringer.wrap(Bytes.toBytes(auth)));
}
}
if (setOrClear) {
service.setAuths(controller, setAuthReqBuilder.build(), rpcCallback);
} else {
service.clearAuths(controller, setAuthReqBuilder.build(), rpcCallback);
}
VisibilityLabelsResponse response = rpcCallback.get();
if (controller.failedOnException()) {
throw controller.getFailedOn();
}
return response;
}
};
Map<byte[], VisibilityLabelsResponse> result = table.coprocessorService(
VisibilityLabelsService.class, HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY,
callable);
return result.values().iterator().next(); // There will be exactly one region for labels
// table and so one entry in result Map.
}
}
示例12: getUserPermissions
import org.apache.hadoop.hbase.client.Connection; //导入方法依赖的package包/类
/**
* List all the userPermissions matching the given pattern.
* @param connection The Connection instance to use
* @param tableRegex The regular expression string to match against
* @return - returns an array of UserPermissions
* @throws Throwable
*/
public static List<UserPermission> getUserPermissions(Connection connection, String tableRegex)
throws Throwable {
PayloadCarryingRpcController controller
= ((ClusterConnection) connection).getRpcControllerFactory().newController();
List<UserPermission> permList = new ArrayList<UserPermission>();
try (Table table = connection.getTable(ACL_TABLE_NAME)) {
try (Admin admin = connection.getAdmin()) {
CoprocessorRpcChannel service = table.coprocessorService(HConstants.EMPTY_START_ROW);
BlockingInterface protocol =
AccessControlProtos.AccessControlService.newBlockingStub(service);
HTableDescriptor[] htds = null;
if (tableRegex == null || tableRegex.isEmpty()) {
permList = ProtobufUtil.getUserPermissions(controller, protocol);
} else if (tableRegex.charAt(0) == '@') {
String namespace = tableRegex.substring(1);
permList = ProtobufUtil.getUserPermissions(controller, protocol,
Bytes.toBytes(namespace));
} else {
htds = admin.listTables(Pattern.compile(tableRegex), true);
for (HTableDescriptor hd : htds) {
permList.addAll(ProtobufUtil.getUserPermissions(controller, protocol,
hd.getTableName()));
}
}
}
}
return permList;
}
示例13: createRowkeyQueueBySecondaryIndex
import org.apache.hadoop.hbase.client.Connection; //导入方法依赖的package包/类
/**
* scan all index tables, common rowkeys will be saved in rowkeySet
* can be optimized in 2 ways:
* 1. scan index tables by the order of #CandidateRowkeys, similar to CCIndex
* 2. scan index tables in parallel
*
* @throws IOException
*/
public static Queue<byte[]> createRowkeyQueueBySecondaryIndex(Connection conn,
IndexTableRelation relation, Map<byte[], NavigableSet<byte[]>> familyMap,
ScanRange.ScanRangeList rangeList, Scan rawScan) throws IOException {
TreeSet<byte[]> rowkeySet = null;
long timeToMerge = 0;
for (ScanRange range : rangeList.getRanges()) {
Scan scan = new Scan();
scan.setStartRow(range.getStart());
scan.setStopRow(range.getStop());
scan.setFamilyMap(familyMap);
scan.setCaching(rawScan.getCaching());
scan.setCacheBlocks(rawScan.getCacheBlocks());
scan.setId(rawScan.getId());
if (range.getStartTs() != -1 && range.getStopTs() != -1) {
scan.setTimeRange(range.getStartTs(), range.getStopTs());
}
TableName tableName = relation.getIndexTableName(range.getFamily(), range.getQualifier());
Table table = conn.getTable(tableName);
ResultScanner scanner = table.getScanner(scan);
Result res;
long timeStart = System.currentTimeMillis();
TreeSet<byte[]> candidateSet = new TreeSet<>(Bytes.BYTES_COMPARATOR);
while ((res = scanner.next()) != null) {
candidateSet.add(IndexPutParser.parseIndexRowKey(res.getRow())[0]);
}
System.out.println(String
.format("get %d candidate rowkeys from %s in scan %s, cost %.2f seconds",
candidateSet.size(), range.toString(), scan.getId(),
(System.currentTimeMillis() - timeStart) / 1000.0));
if (rowkeySet == null) {
rowkeySet = candidateSet;
} else {
timeStart = System.currentTimeMillis();
rowkeySet = getCommonSet(rowkeySet, candidateSet);
timeToMerge += (System.currentTimeMillis() - timeStart);
}
System.out.println(
"common key set size " + rowkeySet.size() + " after " + range + " in scan " + scan
.getId());
if (rowkeySet.isEmpty()) { // no commons keys at all, can ignore the rest index tables
break;
}
}
System.out.println(String
.format("get %d result rowkeys in scan %s, cost %.2f seconds", rowkeySet.size(),
rawScan.getId(), timeToMerge / 1000.0));
if (rowkeySet != null && !rowkeySet.isEmpty()) {
Queue<byte[]> rowkeyQueue = new LinkedList<>();
for (byte[] rowkey : rowkeySet)
rowkeyQueue.add(rowkey);
return rowkeyQueue;
}
return null;
}
示例14: doDelete
import org.apache.hadoop.hbase.client.Connection; //导入方法依赖的package包/类
private static void doDelete(final Connection connection, final Delete delete)
throws IOException {
try (Table table = connection.getTable(QuotaUtil.QUOTA_TABLE_NAME)) {
table.delete(delete);
}
}
示例15: doGet
import org.apache.hadoop.hbase.client.Connection; //导入方法依赖的package包/类
protected static Result doGet(final Connection connection, final Get get) throws IOException {
try (Table table = connection.getTable(QUOTA_TABLE_NAME)) {
return table.get(get);
}
}