本文整理汇总了Java中org.apache.hadoop.hbase.client.Get类的典型用法代码示例。如果您正苦于以下问题:Java Get类的具体用法?Java Get怎么用?Java Get使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
Get类属于org.apache.hadoop.hbase.client包,在下文中一共展示了Get类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: deleteAndWait
import org.apache.hadoop.hbase.client.Get; //导入依赖的package包/类
private void deleteAndWait(byte[] row, Table source, Table... targets)
throws Exception {
Delete del = new Delete(row);
source.delete(del);
Get get = new Get(row);
for (int i = 0; i < NB_RETRIES; i++) {
if (i==NB_RETRIES-1) {
fail("Waited too much time for del replication");
}
boolean removedFromAll = true;
for (Table target : targets) {
Result res = target.get(get);
if (res.size() >= 1) {
LOG.info("Row not deleted");
removedFromAll = false;
break;
}
}
if (removedFromAll) {
break;
} else {
Thread.sleep(SLEEP_TIME);
}
}
}
示例2: buildGetRowOrBeforeRequest
import org.apache.hadoop.hbase.client.Get; //导入依赖的package包/类
/**
* Create a new protocol buffer GetRequest to get a row, all columns in a family.
* If there is no such row, return the closest row before it.
*
* @param regionName the name of the region to get
* @param row the row to get
* @param family the column family to get
* should return the immediate row before
* @return a protocol buffer GetReuqest
*/
public static GetRequest buildGetRowOrBeforeRequest(
final byte[] regionName, final byte[] row, final byte[] family) {
GetRequest.Builder builder = GetRequest.newBuilder();
RegionSpecifier region = buildRegionSpecifier(
RegionSpecifierType.REGION_NAME, regionName);
builder.setRegion(region);
Column.Builder columnBuilder = Column.newBuilder();
columnBuilder.setFamily(ByteStringer.wrap(family));
ClientProtos.Get.Builder getBuilder =
ClientProtos.Get.newBuilder();
getBuilder.setRow(ByteStringer.wrap(row));
getBuilder.addColumn(columnBuilder.build());
getBuilder.setClosestRowBefore(true);
builder.setGet(getBuilder.build());
return builder.build();
}
示例3: get
import org.apache.hadoop.hbase.client.Get; //导入依赖的package包/类
@Override
public <T> List<T> get(TableName tableName, final List<Get> getList, final RowMapper<T>
mapper) {
assertAccessAvailable();
return execute(tableName, new TableCallback<List<T>>() {
@Override
public List<T> doInTable(Table table) throws Throwable {
Result[] result = table.get(getList);
List<T> list = new ArrayList<>(result.length);
for (int i = 0; i < result.length; i++) {
T t = mapper.mapRow(result[i], i);
list.add(t);
}
return list;
}
});
}
示例4: verifyNumericRows
import org.apache.hadoop.hbase.client.Get; //导入依赖的package包/类
public void verifyNumericRows(HRegion region, final byte[] f, int startRow, int endRow,
final boolean present) throws IOException {
for (int i = startRow; i < endRow; i++) {
String failMsg = "Failed verification of row :" + i;
byte[] data = Bytes.toBytes(String.valueOf(i));
Result result = region.get(new Get(data));
boolean hasResult = result != null && !result.isEmpty();
assertEquals(failMsg + result, present, hasResult);
if (!present) continue;
assertTrue(failMsg, result.containsColumn(f, null));
assertEquals(failMsg, result.getColumnCells(f, null).size(), 1);
Cell cell = result.getColumnLatestCell(f, null);
assertTrue(failMsg,
Bytes.equals(data, 0, data.length, cell.getValueArray(), cell.getValueOffset(),
cell.getValueLength()));
}
}
示例5: createGet
import org.apache.hadoop.hbase.client.Get; //导入依赖的package包/类
protected Get createGet(long keyToRead) throws IOException {
Get get = new Get(dataGenerator.getDeterministicUniqueKey(keyToRead));
String cfsString = "";
byte[][] columnFamilies = dataGenerator.getColumnFamilies();
for (byte[] cf : columnFamilies) {
get.addFamily(cf);
if (verbose) {
if (cfsString.length() > 0) {
cfsString += ", ";
}
cfsString += "[" + Bytes.toStringBinary(cf) + "]";
}
}
get = dataGenerator.beforeGet(keyToRead, get);
if (regionReplicaId > 0) {
get.setReplicaId(regionReplicaId);
get.setConsistency(Consistency.TIMELINE);
}
if (verbose) {
LOG.info("[" + readerId + "] " + "Querying key " + keyToRead + ", cfs " + cfsString);
}
return get;
}
示例6: next
import org.apache.hadoop.hbase.client.Get; //导入依赖的package包/类
@Override public Result next() throws IOException {
if (rawTable == null) return null;
if (localCache.isEmpty()) {
// load cache by batch get
int size = Math.min(rowkeyQueue.size(), LOCAL_CACHE_SIZE);
List<Get> gets = new ArrayList<>(size);
for (int i = 0; i < size; i++) {
gets.add(new Get(rowkeyQueue.poll()));
}
Result[] results = rawTable.get(gets);
for (Result res : results) {
localCache.add(res);
}
}
if (localCache.isEmpty()) {
// still empty, no more result, set rawTable to null
rawTable.close();
rawTable = null;
return null;
}
return localCache.poll();
}
示例7: getRegionLocation
import org.apache.hadoop.hbase.client.Get; //导入依赖的package包/类
/**
* Returns the HRegionLocation from meta for the given region
* @param connection connection we're using
* @param regionName region we're looking for
* @return HRegionLocation for the given region
* @throws IOException
*/
public static HRegionLocation getRegionLocation(Connection connection,
byte[] regionName) throws IOException {
byte[] row = regionName;
HRegionInfo parsedInfo = null;
try {
parsedInfo = parseRegionInfoFromRegionName(regionName);
row = getMetaKeyForRegion(parsedInfo);
} catch (Exception parseEx) {
// Ignore. This is used with tableName passed as regionName.
}
Get get = new Get(row);
get.addFamily(HConstants.CATALOG_FAMILY);
Result r = get(getMetaHTable(connection), get);
RegionLocations locations = getRegionLocations(r);
return locations == null
? null
: locations.getRegionLocation(parsedInfo == null ? 0 : parsedInfo.getReplicaId());
}
示例8: testReplayingFlushRequestRestoresReadsEnabledState
import org.apache.hadoop.hbase.client.Get; //导入依赖的package包/类
/**
* Test the case where the secondary region replica is not in reads enabled state because it is
* waiting for a flush or region open marker from primary region. Replaying CANNOT_FLUSH
* flush marker entry should restore the reads enabled status in the region and allow the reads
* to continue.
*/
@Test
public void testReplayingFlushRequestRestoresReadsEnabledState() throws IOException {
disableReads(secondaryRegion);
// Test case 1: Test that replaying CANNOT_FLUSH request marker assuming this came from
// triggered flush restores readsEnabled
primaryRegion.flushcache(true, true);
reader = createWALReaderForPrimary();
while (true) {
WAL.Entry entry = reader.next();
if (entry == null) {
break;
}
FlushDescriptor flush = WALEdit.getFlushDescriptor(entry.getEdit().getCells().get(0));
if (flush != null) {
secondaryRegion.replayWALFlushMarker(flush, entry.getKey().getLogSeqNum());
}
}
// now reads should be enabled
secondaryRegion.get(new Get(Bytes.toBytes(0)));
}
示例9: wait
import org.apache.hadoop.hbase.client.Get; //导入依赖的package包/类
private void wait(byte[] row, Table target, boolean isDeleted)
throws Exception {
Get get = new Get(row);
for (int i = 0; i < NB_RETRIES; i++) {
if (i == NB_RETRIES - 1) {
fail("Waited too much time for replication. Row:" + Bytes.toString(row)
+ ". IsDeleteReplication:" + isDeleted);
}
Result res = target.get(get);
boolean sleep = isDeleted ? res.size() > 0 : res.size() == 0;
if (sleep) {
LOG.info("Waiting for more time for replication. Row:"
+ Bytes.toString(row) + ". IsDeleteReplication:" + isDeleted);
Thread.sleep(SLEEP_TIME);
} else {
if (!isDeleted) {
assertArrayEquals(res.value(), row);
}
LOG.info("Obtained row:"
+ Bytes.toString(row) + ". IsDeleteReplication:" + isDeleted);
break;
}
}
}
示例10: checkWithWait
import org.apache.hadoop.hbase.client.Get; //导入依赖的package包/类
private void checkWithWait(byte[] row, int count, Table table) throws Exception {
Get get = new Get(row);
for (int i = 0; i < NB_RETRIES; i++) {
if (i == NB_RETRIES - 1) {
fail("Waited too much time while getting the row.");
}
boolean rowReplicated = false;
Result res = table.get(get);
if (res.size() >= 1) {
LOG.info("Row is replicated");
rowReplicated = true;
assertEquals("Table '" + table + "' did not have the expected number of results.",
count, res.size());
break;
}
if (rowReplicated) {
break;
} else {
Thread.sleep(SLEEP_TIME);
}
}
}
示例11: testNamespaceUserGrant
import org.apache.hadoop.hbase.client.Get; //导入依赖的package包/类
@Test (timeout=180000)
public void testNamespaceUserGrant() throws Exception {
AccessTestAction getAction = new AccessTestAction() {
@Override
public Object run() throws Exception {
try(Connection conn = ConnectionFactory.createConnection(conf);
Table t = conn.getTable(TEST_TABLE);) {
return t.get(new Get(TEST_ROW));
}
}
};
String namespace = TEST_TABLE.getNamespaceAsString();
// Grant namespace READ to USER_NONE, this should supersede any table permissions
grantOnNamespace(TEST_UTIL, USER_NONE.getShortName(), namespace, Permission.Action.READ);
// Now USER_NONE should be able to read
verifyAllowed(getAction, USER_NONE);
// Revoke namespace READ to USER_NONE
revokeFromNamespace(TEST_UTIL, USER_NONE.getShortName(), namespace, Permission.Action.READ);
verifyDenied(getAction, USER_NONE);
}
示例12: blockUntilRegionIsOpened
import org.apache.hadoop.hbase.client.Get; //导入依赖的package包/类
public static void blockUntilRegionIsOpened(Configuration conf, long timeout, HRegionInfo hri)
throws IOException, InterruptedException {
log("blocking until region is opened for reading:" + hri.getRegionNameAsString());
long start = System.currentTimeMillis();
try (Connection conn = ConnectionFactory.createConnection(conf);
Table table = conn.getTable(hri.getTable())) {
byte[] row = hri.getStartKey();
// Check for null/empty row. If we find one, use a key that is likely to be in first region.
if (row == null || row.length <= 0) row = new byte[] { '0' };
Get get = new Get(row);
while (System.currentTimeMillis() - start < timeout) {
try {
table.get(get);
break;
} catch (IOException ex) {
// wait some more
}
Threads.sleep(10);
}
}
}
示例13: doGet
import org.apache.hadoop.hbase.client.Get; //导入依赖的package包/类
/**
* Gets a Table for this table, does the get and closes the Table
*/
public Result doGet(final Get get) {
Result result;
final Table tableInterface = getTable();
try {
result = doGet(tableInterface, get);
} finally {
closeTable(tableInterface);
}
return result;
}
示例14: testMergeTool
import org.apache.hadoop.hbase.client.Get; //导入依赖的package包/类
/**
* Test merge tool.
* @throws Exception
*/
public void testMergeTool() throws Exception {
// First verify we can read the rows from the source regions and that they
// contain the right data.
for (int i = 0; i < regions.length; i++) {
for (int j = 0; j < rows[i].length; j++) {
Get get = new Get(rows[i][j]);
get.addFamily(FAMILY);
Result result = regions[i].get(get);
byte [] bytes = CellUtil.cloneValue(result.rawCells()[0]);
assertNotNull(bytes);
assertTrue(Bytes.equals(bytes, rows[i][j]));
}
// Close the region and delete the log
HRegion.closeHRegion(regions[i]);
}
WAL log = wals.getWAL(new byte[]{});
// Merge Region 0 and Region 1
HRegion merged = mergeAndVerify("merging regions 0 and 1 ",
this.sourceRegions[0].getRegionNameAsString(),
this.sourceRegions[1].getRegionNameAsString(), log, 2);
// Merge the result of merging regions 0 and 1 with region 2
merged = mergeAndVerify("merging regions 0+1 and 2",
merged.getRegionInfo().getRegionNameAsString(),
this.sourceRegions[2].getRegionNameAsString(), log, 3);
// Merge the result of merging regions 0, 1 and 2 with region 3
merged = mergeAndVerify("merging regions 0+1+2 and 3",
merged.getRegionInfo().getRegionNameAsString(),
this.sourceRegions[3].getRegionNameAsString(), log, 4);
// Merge the result of merging regions 0, 1, 2 and 3 with region 4
merged = mergeAndVerify("merging regions 0+1+2+3 and 4",
merged.getRegionInfo().getRegionNameAsString(),
this.sourceRegions[4].getRegionNameAsString(), log, rows.length);
}
示例15: testScanAcrossManySmallColumns
import org.apache.hadoop.hbase.client.Get; //导入依赖的package包/类
/**
* Usecase:
*
* - create a row with 1M cells, 10 bytes in each
* - flush & run major compaction
* - try to Get whole row.
*
* OOME happened in StoreScanner.next(..).
*
* @throws IOException
*/
@Test(expected = RowTooBigException.class)
public void testScanAcrossManySmallColumns() throws IOException {
byte[] row1 = Bytes.toBytes("row1");
byte[] fam1 = Bytes.toBytes("fam1");
HTableDescriptor htd = TEST_HTD;
HColumnDescriptor hcd = new HColumnDescriptor(fam1);
if (htd.hasFamily(hcd.getName())) {
htd.modifyFamily(hcd);
} else {
htd.addFamily(hcd);
}
final HRegionInfo hri =
new HRegionInfo(htd.getTableName(), HConstants.EMPTY_END_ROW,
HConstants.EMPTY_END_ROW);
Region region = HTU.createHRegion(hri, rootRegionDir, HTU.getConfiguration(), htd);
try {
// Add to memstore
for (int i = 0; i < 10; i++) {
Put put = new Put(row1);
for (int j = 0; j < 10 * 10000; j++) {
put.add(fam1, Bytes.toBytes("col_" + i + "_" + j), new byte[10]);
}
region.put(put);
region.flush(true);
}
region.compact(true);
Get get = new Get(row1);
region.get(get);
} finally {
HBaseTestingUtility.closeRegion(region);
}
}