本文整理汇总了Java中org.apache.hadoop.hbase.regionserver.HRegion.get方法的典型用法代码示例。如果您正苦于以下问题:Java HRegion.get方法的具体用法?Java HRegion.get怎么用?Java HRegion.get使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hbase.regionserver.HRegion
的用法示例。
在下文中一共展示了HRegion.get方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: verifyNumericRows
import org.apache.hadoop.hbase.regionserver.HRegion; //导入方法依赖的package包/类
public void verifyNumericRows(HRegion region, final byte[] f, int startRow, int endRow,
final boolean present) throws IOException {
for (int i = startRow; i < endRow; i++) {
String failMsg = "Failed verification of row :" + i;
byte[] data = Bytes.toBytes(String.valueOf(i));
Result result = region.get(new Get(data));
boolean hasResult = result != null && !result.isEmpty();
assertEquals(failMsg + result, present, hasResult);
if (!present) continue;
assertTrue(failMsg, result.containsColumn(f, null));
assertEquals(failMsg, result.getColumnCells(f, null).size(), 1);
Cell cell = result.getColumnLatestCell(f, null);
assertTrue(failMsg,
Bytes.equals(data, 0, data.length, cell.getValueArray(), cell.getValueOffset(),
cell.getValueLength()));
}
}
示例2: assertResultEquals
import org.apache.hadoop.hbase.regionserver.HRegion; //导入方法依赖的package包/类
protected void assertResultEquals(final HRegion region, final byte [] row,
final byte [] family, final byte [] qualifier, final long timestamp,
final byte [] value)
throws IOException {
Get get = new Get(row);
get.setTimeStamp(timestamp);
Result res = region.get(get);
NavigableMap<byte[], NavigableMap<byte[], NavigableMap<Long, byte[]>>> map =
res.getMap();
byte [] res_value = map.get(family).get(qualifier).get(timestamp);
if (value == null) {
assertEquals(Bytes.toString(family) + " " + Bytes.toString(qualifier) +
" at timestamp " + timestamp, null, res_value);
} else {
if (res_value == null) {
fail(Bytes.toString(family) + " " + Bytes.toString(qualifier) +
" at timestamp " + timestamp + "\" was expected to be \"" +
Bytes.toStringBinary(value) + " but was null");
}
if (res_value != null) {
assertEquals(Bytes.toString(family) + " " + Bytes.toString(qualifier) +
" at timestamp " +
timestamp, value, new String(res_value));
}
}
}
示例3: verifyMerge
import org.apache.hadoop.hbase.regionserver.HRegion; //导入方法依赖的package包/类
private void verifyMerge(final HRegion merged, final int upperbound)
throws IOException {
//Test
Scan scan = new Scan();
scan.addFamily(FAMILY);
InternalScanner scanner = merged.getScanner(scan);
try {
List<Cell> testRes = null;
while (true) {
testRes = new ArrayList<Cell>();
boolean hasNext = scanner.next(testRes);
if (!hasNext) {
break;
}
}
} finally {
scanner.close();
}
//!Test
for (int i = 0; i < upperbound; i++) {
for (int j = 0; j < rows[i].length; j++) {
Get get = new Get(rows[i][j]);
get.addFamily(FAMILY);
Result result = merged.get(get);
assertEquals(1, result.size());
byte [] bytes = CellUtil.cloneValue(result.rawCells()[0]);
assertNotNull(Bytes.toStringBinary(rows[i][j]), bytes);
assertTrue(Bytes.equals(bytes, rows[i][j]));
}
}
}
示例4: testFlushSequenceIdIsGreaterThanAllEditsInHFile
import org.apache.hadoop.hbase.regionserver.HRegion; //导入方法依赖的package包/类
/**
* Test flush for sure has a sequence id that is beyond the last edit appended. We do this
* by slowing appends in the background ring buffer thread while in foreground we call
* flush. The addition of the sync over HRegion in flush should fix an issue where flush was
* returning before all of its appends had made it out to the WAL (HBASE-11109).
* @throws IOException
* @see HBASE-11109
*/
@Test
public void testFlushSequenceIdIsGreaterThanAllEditsInHFile() throws IOException {
String testName = "testFlushSequenceIdIsGreaterThanAllEditsInHFile";
final TableName tableName = TableName.valueOf(testName);
final HRegionInfo hri = new HRegionInfo(tableName);
final byte[] rowName = tableName.getName();
final HTableDescriptor htd = new HTableDescriptor(tableName);
htd.addFamily(new HColumnDescriptor("f"));
HRegion r = HRegion.createHRegion(hri, TEST_UTIL.getDefaultRootDirPath(),
TEST_UTIL.getConfiguration(), htd);
HRegion.closeHRegion(r);
final int countPerFamily = 10;
final MutableBoolean goslow = new MutableBoolean(false);
// subclass and doctor a method.
FSHLog wal = new FSHLog(FileSystem.get(conf), TEST_UTIL.getDefaultRootDirPath(),
testName, conf) {
@Override
void atHeadOfRingBufferEventHandlerAppend() {
if (goslow.isTrue()) {
Threads.sleep(100);
LOG.debug("Sleeping before appending 100ms");
}
super.atHeadOfRingBufferEventHandlerAppend();
}
};
HRegion region = HRegion.openHRegion(TEST_UTIL.getConfiguration(),
TEST_UTIL.getTestFileSystem(), TEST_UTIL.getDefaultRootDirPath(), hri, htd, wal);
EnvironmentEdge ee = EnvironmentEdgeManager.getDelegate();
try {
List<Put> puts = null;
for (HColumnDescriptor hcd: htd.getFamilies()) {
puts =
TestWALReplay.addRegionEdits(rowName, hcd.getName(), countPerFamily, ee, region, "x");
}
// Now assert edits made it in.
final Get g = new Get(rowName);
Result result = region.get(g);
assertEquals(countPerFamily * htd.getFamilies().size(), result.size());
// Construct a WALEdit and add it a few times to the WAL.
WALEdit edits = new WALEdit();
for (Put p: puts) {
CellScanner cs = p.cellScanner();
while (cs.advance()) {
edits.add(cs.current());
}
}
// Add any old cluster id.
List<UUID> clusterIds = new ArrayList<UUID>();
clusterIds.add(UUID.randomUUID());
// Now make appends run slow.
goslow.setValue(true);
for (int i = 0; i < countPerFamily; i++) {
final HRegionInfo info = region.getRegionInfo();
final WALKey logkey = new WALKey(info.getEncodedNameAsBytes(), tableName,
System.currentTimeMillis(), clusterIds, -1, -1, region.getMVCC());
wal.append(htd, info, logkey, edits, true);
}
region.flush(true);
// FlushResult.flushSequenceId is not visible here so go get the current sequence id.
long currentSequenceId = region.getSequenceId();
// Now release the appends
goslow.setValue(false);
synchronized (goslow) {
goslow.notifyAll();
}
assertTrue(currentSequenceId >= region.getSequenceId());
} finally {
region.close(true);
wal.close();
}
}