本文整理汇总了Java中org.apache.hadoop.hbase.HBaseTestingUtility.SeenRowTracker方法的典型用法代码示例。如果您正苦于以下问题:Java HBaseTestingUtility.SeenRowTracker方法的具体用法?Java HBaseTestingUtility.SeenRowTracker怎么用?Java HBaseTestingUtility.SeenRowTracker使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hbase.HBaseTestingUtility
的用法示例。
在下文中一共展示了HBaseTestingUtility.SeenRowTracker方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: verifyScanner
import org.apache.hadoop.hbase.HBaseTestingUtility; //导入方法依赖的package包/类
private void verifyScanner(ResultScanner scanner, byte[] startRow, byte[] stopRow)
throws IOException, InterruptedException {
HBaseTestingUtility.SeenRowTracker rowTracker =
new HBaseTestingUtility.SeenRowTracker(startRow, stopRow);
while (true) {
Result result = scanner.next();
if (result == null) {
break;
}
verifyRow(result);
rowTracker.addRow(result.getRow());
}
// validate all rows are seen
rowTracker.validate();
}
示例2: verifyWithMockedMapReduce
import org.apache.hadoop.hbase.HBaseTestingUtility; //导入方法依赖的package包/类
private void verifyWithMockedMapReduce(JobConf job, int numRegions, int expectedNumSplits,
byte[] startRow, byte[] stopRow) throws IOException, InterruptedException {
TableSnapshotInputFormat tsif = new TableSnapshotInputFormat();
InputSplit[] splits = tsif.getSplits(job, 0);
Assert.assertEquals(expectedNumSplits, splits.length);
HBaseTestingUtility.SeenRowTracker rowTracker =
new HBaseTestingUtility.SeenRowTracker(startRow, stopRow);
for (int i = 0; i < splits.length; i++) {
// validate input split
InputSplit split = splits[i];
Assert.assertTrue(split instanceof TableSnapshotInputFormat.TableSnapshotRegionSplit);
// validate record reader
OutputCollector collector = mock(OutputCollector.class);
Reporter reporter = mock(Reporter.class);
RecordReader<ImmutableBytesWritable, Result> rr = tsif.getRecordReader(split, job, reporter);
// validate we can read all the data back
ImmutableBytesWritable key = rr.createKey();
Result value = rr.createValue();
while (rr.next(key, value)) {
verifyRowFromMap(key, value);
rowTracker.addRow(key.copyBytes());
}
rr.close();
}
// validate all rows are seen
rowTracker.validate();
}
示例3: verifyWithMockedMapReduce
import org.apache.hadoop.hbase.HBaseTestingUtility; //导入方法依赖的package包/类
private void verifyWithMockedMapReduce(Job job, int numRegions, int expectedNumSplits,
byte[] startRow, byte[] stopRow)
throws IOException, InterruptedException {
TableSnapshotInputFormat tsif = new TableSnapshotInputFormat();
List<InputSplit> splits = tsif.getSplits(job);
Assert.assertEquals(expectedNumSplits, splits.size());
HBaseTestingUtility.SeenRowTracker rowTracker =
new HBaseTestingUtility.SeenRowTracker(startRow, stopRow);
for (int i = 0; i < splits.size(); i++) {
// validate input split
InputSplit split = splits.get(i);
Assert.assertTrue(split instanceof TableSnapshotRegionSplit);
// validate record reader
TaskAttemptContext taskAttemptContext = mock(TaskAttemptContext.class);
when(taskAttemptContext.getConfiguration()).thenReturn(job.getConfiguration());
RecordReader<ImmutableBytesWritable, Result> rr =
tsif.createRecordReader(split, taskAttemptContext);
rr.initialize(split, taskAttemptContext);
// validate we can read all the data back
while (rr.nextKeyValue()) {
byte[] row = rr.getCurrentKey().get();
verifyRowFromMap(rr.getCurrentKey(), rr.getCurrentValue());
rowTracker.addRow(row);
}
rr.close();
}
// validate all rows are seen
rowTracker.validate();
}