本文整理汇总了Java中org.apache.hadoop.hbase.util.Writables.getHRegionInfo方法的典型用法代码示例。如果您正苦于以下问题:Java Writables.getHRegionInfo方法的具体用法?Java Writables.getHRegionInfo怎么用?Java Writables.getHRegionInfo使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hbase.util.Writables
的用法示例。
在下文中一共展示了Writables.getHRegionInfo方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: listAllRegions
import org.apache.hadoop.hbase.util.Writables; //导入方法依赖的package包/类
/**
* Used in tests.
*
* Lists all of the regions currently in META.
* @param conf
* @param offlined True if we are to include offlined regions, false and we'll
* leave out offlined regions from returned list.
* @return List of all user-space regions.
* @throws IOException
*/
public static List<HRegionInfo> listAllRegions(Configuration conf, final boolean offlined)
throws IOException {
final List<HRegionInfo> regions = new ArrayList<HRegionInfo>();
MetaScannerVisitor visitor = new BlockingMetaScannerVisitor(conf) {
@Override
public boolean processRowInternal(Result result) throws IOException {
if (result == null || result.isEmpty()) {
return true;
}
byte [] bytes = result.getValue(HConstants.CATALOG_FAMILY,
HConstants.REGIONINFO_QUALIFIER);
if (bytes == null) {
LOG.warn("Null REGIONINFO_QUALIFIER: " + result);
return true;
}
HRegionInfo regionInfo = Writables.getHRegionInfo(bytes);
// If region offline AND we are not to include offlined regions, return.
if (regionInfo.isOffline() && !offlined) return true;
regions.add(regionInfo);
return true;
}
};
metaScan(conf, visitor);
return regions;
}
示例2: getMetaTableRows
import org.apache.hadoop.hbase.util.Writables; //导入方法依赖的package包/类
/**
* Returns all rows from the .META. table for a given user table
*
* @throws IOException When reading the rows fails.
*/
public List<byte[]> getMetaTableRows(byte[] tableName) throws IOException {
// TODO: Redo using MetaReader.
HTable t = new HTable(new Configuration(this.conf), HConstants.META_TABLE_NAME);
List<byte[]> rows = new ArrayList<byte[]>();
ResultScanner s = t.getScanner(new Scan());
for (Result result : s) {
byte[] val = result.getValue(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
if (val == null) {
LOG.error("No region info for row " + Bytes.toString(result.getRow()));
// TODO figure out what to do for this new hosed case.
continue;
}
HRegionInfo info = Writables.getHRegionInfo(val);
if (Bytes.compareTo(info.getTableName(), tableName) == 0) {
LOG.info("getMetaTableRows: row -> " +
Bytes.toStringBinary(result.getRow()) + info);
rows.add(result.getRow());
}
}
s.close();
t.close();
return rows;
}
示例3: listAllRegions
import org.apache.hadoop.hbase.util.Writables; //导入方法依赖的package包/类
/**
* Lists all of the regions currently in META.
* @param conf
* @param offlined True if we are to include offlined regions, false and we'll
* leave out offlined regions from returned list.
* @return List of all user-space regions.
* @throws IOException
*/
public static List<HRegionInfo> listAllRegions(Configuration conf, final boolean offlined)
throws IOException {
final List<HRegionInfo> regions = new ArrayList<HRegionInfo>();
MetaScannerVisitor visitor = new MetaScannerVisitor() {
@Override
public boolean processRow(Result result) throws IOException {
if (result == null || result.isEmpty()) {
return true;
}
byte [] bytes = result.getValue(HConstants.CATALOG_FAMILY,
HConstants.REGIONINFO_QUALIFIER);
if (bytes == null) {
LOG.warn("Null REGIONINFO_QUALIFIER: " + result);
return true;
}
HRegionInfo regionInfo = Writables.getHRegionInfo(bytes);
// If region offline AND we are not to include offlined regions, return.
if (regionInfo.isOffline() && !offlined) return true;
regions.add(regionInfo);
return true;
}
};
metaScan(conf, visitor);
return regions;
}
示例4: checkSplit
import org.apache.hadoop.hbase.util.Writables; //导入方法依赖的package包/类
static void checkSplit(final Result r, final Put p, final byte [] which)
throws IOException {
byte [] hriSplitBytes = getBytes(r, which);
if (!isMigrated(hriSplitBytes)) {
// This will convert the HRI from 090 to 092 HRI.
HRegionInfo hri = Writables.getHRegionInfo(hriSplitBytes);
p.add(HConstants.CATALOG_FAMILY, which, Writables.getBytes(hri));
}
}
示例5: getHRegionInfo
import org.apache.hadoop.hbase.util.Writables; //导入方法依赖的package包/类
public static HRegionInfo getHRegionInfo(
Result data) throws IOException {
byte [] bytes =
data.getValue(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
if (bytes == null) return null;
HRegionInfo info = Writables.getHRegionInfo(bytes);
LOG.info("Current INFO from scan results = " + info);
return info;
}
示例6: allTableRegions
import org.apache.hadoop.hbase.util.Writables; //导入方法依赖的package包/类
/**
* Lists all of the table regions currently in META.
* @param conf
* @param connection connection to be used internally (null to create a new connection)
* @param offlined True if we are to include offlined regions, false and we'll
* leave out offlined regions from returned list.
* @return Map of all user-space regions to servers
* @throws IOException
*/
public static NavigableMap<HRegionInfo, ServerName> allTableRegions(Configuration conf,
HConnection connection, final byte[] tablename, final boolean offlined) throws IOException {
final NavigableMap<HRegionInfo, ServerName> regions =
new TreeMap<HRegionInfo, ServerName>();
MetaScannerVisitor visitor = new TableMetaScannerVisitor(conf, tablename) {
@Override
public boolean processRowInternal(Result rowResult) throws IOException {
HRegionInfo info = Writables.getHRegionInfo(
rowResult.getValue(HConstants.CATALOG_FAMILY,
HConstants.REGIONINFO_QUALIFIER));
byte [] value = rowResult.getValue(HConstants.CATALOG_FAMILY,
HConstants.SERVER_QUALIFIER);
String hostAndPort = null;
if (value != null && value.length > 0) {
hostAndPort = Bytes.toString(value);
}
value = rowResult.getValue(HConstants.CATALOG_FAMILY,
HConstants.STARTCODE_QUALIFIER);
long startcode = -1L;
if (value != null && value.length > 0) startcode = Bytes.toLong(value);
if (!(info.isOffline() || info.isSplit())) {
ServerName sn = null;
if (hostAndPort != null && hostAndPort.length() > 0) {
sn = new ServerName(hostAndPort, startcode);
}
regions.put(new UnmodifyableHRegionInfo(info), sn);
}
return true;
}
};
metaScan(conf, connection, visitor, tablename);
return regions;
}
示例7: getHRegionInfo
import org.apache.hadoop.hbase.util.Writables; //导入方法依赖的package包/类
/**
* Get HRegionInfo from passed Map of row values.
* @param result Map to do lookup in.
* @return Null if not found (and logs fact that expected COL_REGIONINFO
* was missing) else deserialized {@link HRegionInfo}
* @throws IOException
*/
static HRegionInfo getHRegionInfo(final Result result)
throws IOException {
byte [] bytes =
result.getValue(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
if (bytes == null) {
LOG.warn("REGIONINFO_QUALIFIER is empty in " + result);
return null;
}
return Writables.getHRegionInfo(bytes);
}
示例8: testMigrateHRegionInfoFromVersion0toVersion1
import org.apache.hadoop.hbase.util.Writables; //导入方法依赖的package包/类
@Test
public void testMigrateHRegionInfoFromVersion0toVersion1()
throws IOException {
HTableDescriptor htd =
getHTableDescriptor("testMigrateHRegionInfoFromVersion0toVersion1");
HRegionInfo090x ninety =
new HRegionInfo090x(htd, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
byte [] bytes = Writables.getBytes(ninety);
// Now deserialize into an HRegionInfo
HRegionInfo hri = Writables.getHRegionInfo(bytes);
Assert.assertEquals(hri.getTableNameAsString(),
ninety.getTableDesc().getNameAsString());
Assert.assertEquals(HRegionInfo.VERSION, hri.getVersion());
}
示例9: addToEachStartKey
import org.apache.hadoop.hbase.util.Writables; //导入方法依赖的package包/类
private static int addToEachStartKey(final int expected) throws IOException {
HTable t = new HTable(TEST_UTIL.getConfiguration(), TABLENAME);
HTable meta = new HTable(TEST_UTIL.getConfiguration(),
HConstants.META_TABLE_NAME);
int rows = 0;
Scan scan = new Scan();
scan.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
ResultScanner s = meta.getScanner(scan);
for (Result r = null; (r = s.next()) != null;) {
byte [] b =
r.getValue(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
if (b == null || b.length <= 0) {
break;
}
HRegionInfo hri = Writables.getHRegionInfo(b);
// If start key, add 'aaa'.
byte [] row = getStartKey(hri);
Put p = new Put(row);
p.setWriteToWAL(false);
p.add(getTestFamily(), getTestQualifier(), row);
t.put(p);
rows++;
}
s.close();
Assert.assertEquals(expected, rows);
t.close();
meta.close();
return rows;
}
示例10: addToEachStartKey
import org.apache.hadoop.hbase.util.Writables; //导入方法依赖的package包/类
private static int addToEachStartKey(final int expected) throws IOException {
HTable t = new HTable(TEST_UTIL.getConfiguration(), TABLENAME);
HTable meta = new HTable(TEST_UTIL.getConfiguration(),
HConstants.META_TABLE_NAME);
int rows = 0;
Scan scan = new Scan();
scan.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
ResultScanner s = meta.getScanner(scan);
for (Result r = null; (r = s.next()) != null;) {
byte [] b =
r.getValue(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
if (b == null || b.length <= 0) break;
HRegionInfo hri = Writables.getHRegionInfo(b);
// If start key, add 'aaa'.
byte [] row = getStartKey(hri);
Put p = new Put(row);
p.setWriteToWAL(false);
p.add(getTestFamily(), getTestQualifier(), row);
t.put(p);
rows++;
}
s.close();
Assert.assertEquals(expected, rows);
t.close();
meta.close();
return rows;
}
示例11: regionServerMap
import org.apache.hadoop.hbase.util.Writables; //导入方法依赖的package包/类
@SuppressWarnings("UnusedParameters")
public static NavigableMap<HRegionInfo, ServerName> regionServerMap(Args args, Configuration conf, HConnection connection, final Set<String> tableNames, final boolean offlined) throws IOException {
long timestamp = System.currentTimeMillis();
final NavigableMap<HRegionInfo, ServerName> regions = new TreeMap<>();
if (tableNames.size() == 1) {
return regionServerMap(args, conf, connection, tableNames.toArray(new String[1])[0], offlined);
} else if (tableNames.size() > 1) {
MetaScanner.BlockingMetaScannerVisitor visitor = new MetaScanner.BlockingMetaScannerVisitor(conf) {
@Override
public boolean processRowInternal(Result rowResult) throws IOException {
HRegionInfo info = Writables.getHRegionInfo(rowResult.getValue(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER));
byte[] value = rowResult.getValue(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER);
String hostAndPort = null;
if (value != null && value.length > 0) {
hostAndPort = Bytes.toString(value);
}
value = rowResult.getValue(HConstants.CATALOG_FAMILY, HConstants.STARTCODE_QUALIFIER);
long startcode = -1L;
if (value != null && value.length > 0) startcode = Bytes.toLong(value);
if (!(info.isOffline() || info.isSplit())) {
ServerName sn = null;
if (hostAndPort != null && hostAndPort.length() > 0) {
sn = new ServerName(hostAndPort, startcode);
}
if (info.isOffline() && !offlined) return true;
String tableName = info.getTableNameAsString();
if (tableNames.contains(tableName))
regions.put(info, sn);
}
return true;
}
};
MetaScanner.metaScan(conf, visitor);
}
Util.printVerboseMessage(args, "CommandAdapter.regionServerMap", timestamp);
return regions;
}
示例12: allTableRegions
import org.apache.hadoop.hbase.util.Writables; //导入方法依赖的package包/类
/**
* Lists all of the table regions currently in META.
* @param conf
* @param offlined True if we are to include offlined regions, false and we'll
* leave out offlined regions from returned list.
* @return Map of all user-space regions to servers
* @throws IOException
*/
public static NavigableMap<HRegionInfo, ServerName> allTableRegions(Configuration conf, final byte [] tablename, final boolean offlined)
throws IOException {
final NavigableMap<HRegionInfo, ServerName> regions =
new TreeMap<HRegionInfo, ServerName>();
MetaScannerVisitor visitor = new MetaScannerVisitor() {
@Override
public boolean processRow(Result rowResult) throws IOException {
HRegionInfo info = Writables.getHRegionInfo(
rowResult.getValue(HConstants.CATALOG_FAMILY,
HConstants.REGIONINFO_QUALIFIER));
if (!(Bytes.equals(info.getTableName(), tablename))) {
return false;
}
byte [] value = rowResult.getValue(HConstants.CATALOG_FAMILY,
HConstants.SERVER_QUALIFIER);
String hostAndPort = null;
if (value != null && value.length > 0) {
hostAndPort = Bytes.toString(value);
}
value = rowResult.getValue(HConstants.CATALOG_FAMILY,
HConstants.STARTCODE_QUALIFIER);
long startcode = -1L;
if (value != null && value.length > 0) startcode = Bytes.toLong(value);
if (!(info.isOffline() || info.isSplit())) {
ServerName sn = null;
if (hostAndPort != null && hostAndPort.length() > 0) {
sn = new ServerName(hostAndPort, startcode);
}
regions.put(new UnmodifyableHRegionInfo(info), sn);
}
return true;
}
};
metaScan(conf, visitor, tablename);
return regions;
}
示例13: getStartEndKeys
import org.apache.hadoop.hbase.util.Writables; //导入方法依赖的package包/类
/**
* Gets the starting and ending row keys for every region in the currently
* open table.
* <p>
* This is mainly useful for the MapReduce integration.
* @return Pair of arrays of region starting and ending row keys
* @throws IOException if a remote or network exception occurs
*/
@SuppressWarnings("unchecked")
public Pair<byte[][],byte[][]> getStartEndKeys() throws IOException {
final List<byte[]> startKeyList = new ArrayList<byte[]>();
final List<byte[]> endKeyList = new ArrayList<byte[]>();
MetaScannerVisitor visitor = new MetaScannerVisitor() {
public boolean processRow(Result rowResult) throws IOException {
byte [] bytes = rowResult.getValue(HConstants.CATALOG_FAMILY,
HConstants.REGIONINFO_QUALIFIER);
if (bytes == null) {
LOG.warn("Null " + HConstants.REGIONINFO_QUALIFIER + " cell in " +
rowResult);
return true;
}
HRegionInfo info = Writables.getHRegionInfo(bytes);
if (Bytes.equals(info.getTableName(), getTableName())) {
if (!(info.isOffline() || info.isSplit())) {
startKeyList.add(info.getStartKey());
endKeyList.add(info.getEndKey());
}
}
return true;
}
};
MetaScanner.metaScan(configuration, visitor, this.tableName);
return new Pair<byte [][], byte [][]>(
startKeyList.toArray(new byte[startKeyList.size()][]),
endKeyList.toArray(new byte[endKeyList.size()][]));
}
示例14: getRegionInfo
import org.apache.hadoop.hbase.util.Writables; //导入方法依赖的package包/类
@Override
public TRegionInfo getRegionInfo(ByteBuffer searchRow) throws IOError {
try {
HTable table = getTable(HConstants.META_TABLE_NAME);
byte[] row = toBytes(searchRow);
Result startRowResult = table.getRowOrBefore(
row, HConstants.CATALOG_FAMILY);
if (startRowResult == null) {
throw new IOException("Cannot find row in .META., row="
+ Bytes.toString(searchRow.array()));
}
// find region start and end keys
byte[] value = startRowResult.getValue(HConstants.CATALOG_FAMILY,
HConstants.REGIONINFO_QUALIFIER);
if (value == null || value.length == 0) {
throw new IOException("HRegionInfo REGIONINFO was null or " +
" empty in Meta for row="
+ Bytes.toString(row));
}
HRegionInfo regionInfo = Writables.getHRegionInfo(value);
TRegionInfo region = new TRegionInfo();
region.setStartKey(regionInfo.getStartKey());
region.setEndKey(regionInfo.getEndKey());
region.id = regionInfo.getRegionId();
region.setName(regionInfo.getRegionName());
region.version = regionInfo.getVersion();
// find region assignment to server
value = startRowResult.getValue(HConstants.CATALOG_FAMILY,
HConstants.SERVER_QUALIFIER);
if (value != null && value.length > 0) {
String hostAndPort = Bytes.toString(value);
region.setServerName(Bytes.toBytes(
Addressing.parseHostname(hostAndPort)));
region.port = Addressing.parsePort(hostAndPort);
}
return region;
} catch (IOException e) {
LOG.warn(e.getMessage(), e);
throw new IOError(e.getMessage());
}
}