本文整理汇总了Java中org.apache.hadoop.hbase.client.Connection.getRegionLocator方法的典型用法代码示例。如果您正苦于以下问题:Java Connection.getRegionLocator方法的具体用法?Java Connection.getRegionLocator怎么用?Java Connection.getRegionLocator使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hbase.client.Connection
的用法示例。
在下文中一共展示了Connection.getRegionLocator方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: init
import org.apache.hadoop.hbase.client.Connection; //导入方法依赖的package包/类
private void init() {
logger.debug("Getting region locations");
TableName tableName = TableName.valueOf(hbaseScanSpec.getTableName());
Connection conn = storagePlugin.getConnection();
try (Admin admin = conn.getAdmin();
RegionLocator locator = conn.getRegionLocator(tableName)) {
this.hTableDesc = admin.getTableDescriptor(tableName);
List<HRegionLocation> regionLocations = locator.getAllRegionLocations();
statsCalculator = new TableStatsCalculator(conn, hbaseScanSpec, storagePlugin.getContext().getConfig(), storagePluginConfig);
boolean foundStartRegion = false;
regionsToScan = new TreeMap<>();
for (HRegionLocation regionLocation : regionLocations) {
HRegionInfo regionInfo = regionLocation.getRegionInfo();
if (!foundStartRegion && hbaseScanSpec.getStartRow() != null && hbaseScanSpec.getStartRow().length != 0 && !regionInfo.containsRow(hbaseScanSpec.getStartRow())) {
continue;
}
foundStartRegion = true;
regionsToScan.put(regionInfo, regionLocation.getServerName());
scanSizeInBytes += statsCalculator.getRegionSizeInBytes(regionInfo.getRegionName());
if (hbaseScanSpec.getStopRow() != null && hbaseScanSpec.getStopRow().length != 0 && regionInfo.containsRow(hbaseScanSpec.getStopRow())) {
break;
}
}
} catch (IOException e) {
throw new RuntimeException("Error getting region info for table: " + hbaseScanSpec.getTableName(), e);
}
verifyColumns();
}
示例2: initializeTable
import org.apache.hadoop.hbase.client.Connection; //导入方法依赖的package包/类
/**
* Allows subclasses to initialize the table information.
*
* @param connection The {@link Connection} to the HBase cluster. MUST be unmanaged. We will close.
* @param tableName The {@link TableName} of the table to process.
* @throws IOException
*/
protected void initializeTable(Connection connection, TableName tableName) throws IOException {
if (this.table != null || this.connection != null) {
LOG.warn("initializeTable called multiple times. Overwriting connection and table " +
"reference; TableInputFormatBase will not close these old references when done.");
}
this.table = connection.getTable(tableName);
this.regionLocator = connection.getRegionLocator(tableName);
this.admin = connection.getAdmin();
this.connection = connection;
}
示例3: doBulkLoad
import org.apache.hadoop.hbase.client.Connection; //导入方法依赖的package包/类
/**
* Perform a bulk load of the given directory into the given
* pre-existing table. This method is not threadsafe.
*
* @param hfofDir the directory that was provided as the output path
* of a job using HFileOutputFormat
* @param table the table to load into
* @throws TableNotFoundException if table does not yet exist
*/
@SuppressWarnings("deprecation")
public void doBulkLoad(Path hfofDir, final HTable table)
throws TableNotFoundException, IOException
{
Admin admin = null;
Table t = table;
Connection conn = table.getConnection();
boolean closeConnWhenFinished = false;
try {
if (conn instanceof ClusterConnection && ((ClusterConnection) conn).isManaged()) {
LOG.warn("managed connection cannot be used for bulkload. Creating unmanaged connection.");
// can only use unmanaged connections from here on out.
conn = ConnectionFactory.createConnection(table.getConfiguration());
t = conn.getTable(table.getName());
closeConnWhenFinished = true;
if (conn instanceof ClusterConnection && ((ClusterConnection) conn).isManaged()) {
throw new RuntimeException("Failed to create unmanaged connection.");
}
admin = conn.getAdmin();
} else {
admin = conn.getAdmin();
}
try (RegionLocator rl = conn.getRegionLocator(t.getName())) {
doBulkLoad(hfofDir, admin, t, rl);
}
} finally {
if (admin != null) admin.close();
if (closeConnWhenFinished) {
t.close();
conn.close();
}
}
}
示例4: getRegionCount
import org.apache.hadoop.hbase.client.Connection; //导入方法依赖的package包/类
/**
* Count regions in <code>hbase:meta</code> for passed table.
* @param connection Connection object
* @param tableName table name to count regions for
* @return Count or regions in table <code>tableName</code>
* @throws IOException
*/
public static int getRegionCount(final Connection connection, final TableName tableName)
throws IOException {
try (RegionLocator locator = connection.getRegionLocator(tableName)) {
List<HRegionLocation> locations = locator.getAllRegionLocations();
return locations == null? 0: locations.size();
}
}
示例5: LocalScanner
import org.apache.hadoop.hbase.client.Connection; //导入方法依赖的package包/类
public LocalScanner(Connection conn, IndexTableRelation relation, Scan scan) throws IOException {
super(conn, relation, scan);
table = conn.getTable(relation.getTableName());
RegionLocator locator = conn.getRegionLocator(relation.getTableName());
regionLocationQueue = new LinkedList<>(locator.getAllRegionLocations());
INIT_REGION_SIZE = regionLocationQueue.size();
currentScanner = getNextScanner();
}
示例6: IRScannerInParallel
import org.apache.hadoop.hbase.client.Connection; //导入方法依赖的package包/类
public IRScannerInParallel(Connection conn, IndexTableRelation relation, Scan scan) throws IOException {
super(conn, relation, scan);
table = conn.getTable(relation.getTableName());
RegionLocator locator = conn.getRegionLocator(relation.getTableName());
regionLocationQueue = new LinkedList<>(locator.getAllRegionLocations());
INIT_REGION_SIZE = regionLocationQueue.size();
addNewScanner(true);
for (int i = 1; i < MAX_SCANNER_SIZE; ++i) {
addNewScanner(false);
}
}
示例7: IRScanner
import org.apache.hadoop.hbase.client.Connection; //导入方法依赖的package包/类
public IRScanner(Connection conn, IndexTableRelation relation, Scan scan) throws IOException {
super(conn, relation, scan);
table = conn.getTable(relation.getTableName());
RegionLocator locator = conn.getRegionLocator(relation.getTableName());
regionLocationQueue = new LinkedList<>(locator.getAllRegionLocations());
INIT_REGION_SIZE = regionLocationQueue.size();
currentScanner = getNextScanner();
}
示例8: LocalScannerInParallel
import org.apache.hadoop.hbase.client.Connection; //导入方法依赖的package包/类
public LocalScannerInParallel(Connection conn, IndexTableRelation relation, Scan scan) throws IOException {
super(conn, relation, scan);
table = conn.getTable(relation.getTableName());
RegionLocator locator = conn.getRegionLocator(relation.getTableName());
regionLocationQueue = new LinkedList<>(locator.getAllRegionLocations());
addNewScanner(true);
for (int i = 1; i < MAX_SCANNER_SIZE; ++i) {
addNewScanner(false);
}
}
示例9: countCoveringRegions
import org.apache.hadoop.hbase.client.Connection; //导入方法依赖的package包/类
/**
* count covering regions for [start, end], used in clustering index
*
* @param tableName
* @param start
* @param end
* @return
* @throws IOException
*/
protected static int countCoveringRegions(Connection conn, TableName tableName, byte[] start,
byte[] end) throws IOException {
RegionLocator locator = conn.getRegionLocator(tableName);
List<HRegionLocation> list = locator.getAllRegionLocations();
localTest(list);
int left = start == null ? 0 : lookupRegionIndex(list, start);
int right = end == null ? list.size() - 1 : lookupRegionIndex(list, end);
return right - left + 1;
}
示例10: warmUpConnectionCache
import org.apache.hadoop.hbase.client.Connection; //导入方法依赖的package包/类
private void warmUpConnectionCache(Connection connection, TableName tn) throws IOException {
try (RegionLocator locator = connection.getRegionLocator(tn)) {
LOG.info(
"Warmed up region location cache for " + tn
+ " got " + locator.getAllRegionLocations().size());
}
}
示例11: reOpenAllRegions
import org.apache.hadoop.hbase.client.Connection; //导入方法依赖的package包/类
public boolean reOpenAllRegions(List<HRegionInfo> regions) throws IOException {
boolean done = false;
LOG.info("Bucketing regions by region server...");
List<HRegionLocation> regionLocations = null;
Connection connection = this.masterServices.getConnection();
try (RegionLocator locator = connection.getRegionLocator(tableName)) {
regionLocations = locator.getAllRegionLocations();
}
// Convert List<HRegionLocation> to Map<HRegionInfo, ServerName>.
NavigableMap<HRegionInfo, ServerName> hri2Sn = new TreeMap<HRegionInfo, ServerName>();
for (HRegionLocation location: regionLocations) {
hri2Sn.put(location.getRegionInfo(), location.getServerName());
}
TreeMap<ServerName, List<HRegionInfo>> serverToRegions = Maps.newTreeMap();
List<HRegionInfo> reRegions = new ArrayList<HRegionInfo>();
for (HRegionInfo hri : regions) {
ServerName sn = hri2Sn.get(hri);
// Skip the offlined split parent region
// See HBASE-4578 for more information.
if (null == sn) {
LOG.info("Skip " + hri);
continue;
}
if (!serverToRegions.containsKey(sn)) {
LinkedList<HRegionInfo> hriList = Lists.newLinkedList();
serverToRegions.put(sn, hriList);
}
reRegions.add(hri);
serverToRegions.get(sn).add(hri);
}
LOG.info("Reopening " + reRegions.size() + " regions on "
+ serverToRegions.size() + " region servers.");
this.masterServices.getAssignmentManager().setRegionsToReopen(reRegions);
BulkReOpen bulkReopen = new BulkReOpen(this.server, serverToRegions,
this.masterServices.getAssignmentManager());
while (true) {
try {
if (bulkReopen.bulkReOpen()) {
done = true;
break;
} else {
LOG.warn("Timeout before reopening all regions");
}
} catch (InterruptedException e) {
LOG.warn("Reopen was interrupted");
// Preserve the interrupt.
Thread.currentThread().interrupt();
break;
}
}
return done;
}
示例12: reOpenAllRegions
import org.apache.hadoop.hbase.client.Connection; //导入方法依赖的package包/类
/**
* Reopen all regions from a table after a schema change operation.
**/
public static boolean reOpenAllRegions(
final MasterProcedureEnv env,
final TableName tableName,
final List<HRegionInfo> regionInfoList) throws IOException {
boolean done = false;
LOG.info("Bucketing regions by region server...");
List<HRegionLocation> regionLocations = null;
Connection connection = env.getMasterServices().getConnection();
try (RegionLocator locator = connection.getRegionLocator(tableName)) {
regionLocations = locator.getAllRegionLocations();
}
// Convert List<HRegionLocation> to Map<HRegionInfo, ServerName>.
NavigableMap<HRegionInfo, ServerName> hri2Sn = new TreeMap<HRegionInfo, ServerName>();
for (HRegionLocation location : regionLocations) {
hri2Sn.put(location.getRegionInfo(), location.getServerName());
}
TreeMap<ServerName, List<HRegionInfo>> serverToRegions = Maps.newTreeMap();
List<HRegionInfo> reRegions = new ArrayList<HRegionInfo>();
for (HRegionInfo hri : regionInfoList) {
ServerName sn = hri2Sn.get(hri);
// Skip the offlined split parent region
// See HBASE-4578 for more information.
if (null == sn) {
LOG.info("Skip " + hri);
continue;
}
if (!serverToRegions.containsKey(sn)) {
LinkedList<HRegionInfo> hriList = Lists.newLinkedList();
serverToRegions.put(sn, hriList);
}
reRegions.add(hri);
serverToRegions.get(sn).add(hri);
}
LOG.info("Reopening " + reRegions.size() + " regions on " + serverToRegions.size()
+ " region servers.");
AssignmentManager am = env.getMasterServices().getAssignmentManager();
am.setRegionsToReopen(reRegions);
BulkReOpen bulkReopen = new BulkReOpen(env.getMasterServices(), serverToRegions, am);
while (true) {
try {
if (bulkReopen.bulkReOpen()) {
done = true;
break;
} else {
LOG.warn("Timeout before reopening all regions");
}
} catch (InterruptedException e) {
LOG.warn("Reopen was interrupted");
// Preserve the interrupt.
Thread.currentThread().interrupt();
break;
}
}
return done;
}
示例13: testRegionReplicaReplicationIgnoresDisabledTables
import org.apache.hadoop.hbase.client.Connection; //导入方法依赖的package包/类
public void testRegionReplicaReplicationIgnoresDisabledTables(boolean dropTable)
throws Exception {
// tests having edits from a disabled or dropped table is handled correctly by skipping those
// entries and further edits after the edits from dropped/disabled table can be replicated
// without problems.
TableName tableName = TableName.valueOf("testRegionReplicaReplicationIgnoresDisabledTables"
+ dropTable);
HTableDescriptor htd = HTU.createTableDescriptor(tableName.toString());
int regionReplication = 3;
htd.setRegionReplication(regionReplication);
HTU.deleteTableIfAny(tableName);
HTU.getHBaseAdmin().createTable(htd);
TableName toBeDisabledTable = TableName.valueOf(dropTable ? "droppedTable" : "disabledTable");
HTU.deleteTableIfAny(toBeDisabledTable);
htd = HTU.createTableDescriptor(toBeDisabledTable.toString());
htd.setRegionReplication(regionReplication);
HTU.getHBaseAdmin().createTable(htd);
// both tables are created, now pause replication
ReplicationAdmin admin = new ReplicationAdmin(HTU.getConfiguration());
admin.disablePeer(ServerRegionReplicaUtil.getReplicationPeerId());
// now that the replication is disabled, write to the table to be dropped, then drop the table.
Connection connection = ConnectionFactory.createConnection(HTU.getConfiguration());
Table table = connection.getTable(tableName);
Table tableToBeDisabled = connection.getTable(toBeDisabledTable);
HTU.loadNumericRows(tableToBeDisabled, HBaseTestingUtility.fam1, 6000, 7000);
AtomicLong skippedEdits = new AtomicLong();
RegionReplicaReplicationEndpoint.RegionReplicaOutputSink sink =
mock(RegionReplicaReplicationEndpoint.RegionReplicaOutputSink.class);
when(sink.getSkippedEditsCounter()).thenReturn(skippedEdits);
RegionReplicaReplicationEndpoint.RegionReplicaSinkWriter sinkWriter =
new RegionReplicaReplicationEndpoint.RegionReplicaSinkWriter(sink,
(ClusterConnection) connection,
Executors.newSingleThreadExecutor(), Integer.MAX_VALUE);
RegionLocator rl = connection.getRegionLocator(toBeDisabledTable);
HRegionLocation hrl = rl.getRegionLocation(HConstants.EMPTY_BYTE_ARRAY);
byte[] encodedRegionName = hrl.getRegionInfo().getEncodedNameAsBytes();
Entry entry = new Entry(
new WALKey(encodedRegionName, toBeDisabledTable, 1),
new WALEdit());
HTU.getHBaseAdmin().disableTable(toBeDisabledTable); // disable the table
if (dropTable) {
HTU.getHBaseAdmin().deleteTable(toBeDisabledTable);
}
sinkWriter.append(toBeDisabledTable, encodedRegionName,
HConstants.EMPTY_BYTE_ARRAY, Lists.newArrayList(entry, entry));
assertEquals(2, skippedEdits.get());
try {
// load some data to the to-be-dropped table
// load the data to the table
HTU.loadNumericRows(table, HBaseTestingUtility.fam1, 0, 1000);
// now enable the replication
admin.enablePeer(ServerRegionReplicaUtil.getReplicationPeerId());
verifyReplication(tableName, regionReplication, 0, 1000);
} finally {
admin.close();
table.close();
rl.close();
tableToBeDisabled.close();
HTU.deleteTableIfAny(toBeDisabledTable);
connection.close();
}
}
示例14: setUpBeforeClass
import org.apache.hadoop.hbase.client.Connection; //导入方法依赖的package包/类
@BeforeClass
public static void setUpBeforeClass() throws Exception {
TEST_UTIL.startMiniCluster(3);
REST_TEST_UTIL.startServletContainer(TEST_UTIL.getConfiguration());
client = new Client(new Cluster().add("localhost",
REST_TEST_UTIL.getServletPort()));
context = JAXBContext.newInstance(
TableModel.class,
TableInfoModel.class,
TableListModel.class,
TableRegionModel.class);
Admin admin = TEST_UTIL.getHBaseAdmin();
if (admin.tableExists(TABLE)) {
return;
}
HTableDescriptor htd = new HTableDescriptor(TABLE);
htd.addFamily(new HColumnDescriptor(COLUMN_FAMILY));
admin.createTable(htd);
byte[] k = new byte[3];
byte [][] famAndQf = KeyValue.parseColumn(Bytes.toBytes(COLUMN));
List<Put> puts = new ArrayList<>();
for (byte b1 = 'a'; b1 < 'z'; b1++) {
for (byte b2 = 'a'; b2 < 'z'; b2++) {
for (byte b3 = 'a'; b3 < 'z'; b3++) {
k[0] = b1;
k[1] = b2;
k[2] = b3;
Put put = new Put(k);
put.setDurability(Durability.SKIP_WAL);
put.add(famAndQf[0], famAndQf[1], k);
puts.add(put);
}
}
}
Connection connection = TEST_UTIL.getConnection();
Table table = connection.getTable(TABLE);
table.put(puts);
table.close();
// get the initial layout (should just be one region)
RegionLocator regionLocator = connection.getRegionLocator(TABLE);
List<HRegionLocation> m = regionLocator.getAllRegionLocations();
assertEquals(m.size(), 1);
// tell the master to split the table
admin.split(TABLE);
// give some time for the split to happen
long timeout = System.currentTimeMillis() + (15 * 1000);
while (System.currentTimeMillis() < timeout && m.size()!=2){
try {
Thread.sleep(250);
} catch (InterruptedException e) {
LOG.warn(StringUtils.stringifyException(e));
}
// check again
m = regionLocator.getAllRegionLocations();
}
// should have two regions now
assertEquals(m.size(), 2);
regionMap = m;
LOG.info("regions: " + regionMap);
regionLocator.close();
}