本文整理汇总了Java中org.apache.hadoop.hbase.client.Scan.setStopRow方法的典型用法代码示例。如果您正苦于以下问题:Java Scan.setStopRow方法的具体用法?Java Scan.setStopRow怎么用?Java Scan.setStopRow使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hbase.client.Scan
的用法示例。
在下文中一共展示了Scan.setStopRow方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testMinWithInvalidRange2
import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
@Test(timeout = 300000)
public void testMinWithInvalidRange2() {
AggregationClient aClient = new AggregationClient(conf);
Scan scan = new Scan();
scan.addFamily(TEST_FAMILY);
scan.setStartRow(ROWS[6]);
scan.setStopRow(ROWS[6]);
final ColumnInterpreter<Double, Double, EmptyMsg, DoubleMsg, DoubleMsg> ci =
new DoubleColumnInterpreter();
Double min = null;
try {
min = aClient.min(TEST_TABLE, ci, scan);
} catch (Throwable e) {
}
assertEquals(null, min);// control should go to the catch block
}
示例2: testAvgWithInvalidRange
import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
@Test (timeout=300000)
public void testAvgWithInvalidRange() {
AggregationClient aClient = new AggregationClient(conf);
Scan scan = new Scan();
scan.addColumn(TEST_FAMILY, TEST_QUALIFIER);
scan.setStartRow(ROWS[5]);
scan.setStopRow(ROWS[1]);
final ColumnInterpreter<BigDecimal, BigDecimal, EmptyMsg, BigDecimalMsg, BigDecimalMsg> ci =
new BigDecimalColumnInterpreter();
Double avg = null;
try {
avg = aClient.avg(TEST_TABLE, ci, scan);
} catch (Throwable e) {
}
assertEquals(null, avg);// control should go to the catch block
}
示例3: testMinWithInvalidRange
import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
@Test (timeout=300000)
public void testMinWithInvalidRange() {
AggregationClient aClient = new AggregationClient(conf);
BigDecimal min = null;
Scan scan = new Scan();
scan.addFamily(TEST_FAMILY);
scan.setStartRow(ROWS[4]);
scan.setStopRow(ROWS[2]);
final ColumnInterpreter<BigDecimal, BigDecimal, EmptyMsg, BigDecimalMsg, BigDecimalMsg> ci =
new BigDecimalColumnInterpreter();
try {
min = aClient.min(TEST_TABLE, ci, scan);
} catch (Throwable e) {
}
assertEquals(null, min);// control should go to the catch block
}
示例4: testAvgWithInvalidRange
import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
@Test(timeout = 300000)
public void testAvgWithInvalidRange() {
AggregationClient aClient = new AggregationClient(conf);
Scan scan = new Scan();
scan.addColumn(TEST_FAMILY, TEST_QUALIFIER);
scan.setStartRow(ROWS[5]);
scan.setStopRow(ROWS[1]);
final ColumnInterpreter<Double, Double, EmptyMsg, DoubleMsg, DoubleMsg> ci =
new DoubleColumnInterpreter();
Double avg = null;
try {
avg = aClient.avg(TEST_TABLE, ci, scan);
} catch (Throwable e) {
}
assertEquals(null, avg);// control should go to the catch block
}
示例5: testMaxWithInvalidRange
import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
@Test(timeout = 300000)
public void testMaxWithInvalidRange() {
AggregationClient aClient = new AggregationClient(conf);
final ColumnInterpreter<Double, Double, EmptyMsg, DoubleMsg, DoubleMsg> ci =
new DoubleColumnInterpreter();
Scan scan = new Scan();
scan.setStartRow(ROWS[4]);
scan.setStopRow(ROWS[2]);
scan.addColumn(TEST_FAMILY, TEST_QUALIFIER);
double max = Double.MIN_VALUE;
;
try {
max = aClient.max(TEST_TABLE, ci, scan);
} catch (Throwable e) {
max = 0.00;
}
assertEquals(0.00, max, 0.00);// control should go to the catch block
}
示例6: testMaxWithInvalidRange
import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
@Test (timeout=300000)
public void testMaxWithInvalidRange() {
AggregationClient aClient = new AggregationClient(conf);
final ColumnInterpreter<BigDecimal, BigDecimal, EmptyMsg, BigDecimalMsg, BigDecimalMsg> ci =
new BigDecimalColumnInterpreter();
Scan scan = new Scan();
scan.setStartRow(ROWS[4]);
scan.setStopRow(ROWS[2]);
scan.addColumn(TEST_FAMILY, TEST_QUALIFIER);
BigDecimal max = new BigDecimal(Long.MIN_VALUE);
;
try {
max = aClient.max(TEST_TABLE, ci, scan);
} catch (Throwable e) {
max = BigDecimal.ZERO;
}
assertEquals(BigDecimal.ZERO, max);// control should go to the catch block
}
示例7: testStdWithValidRangeWithNullCF
import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
@Test(timeout = 300000)
public void testStdWithValidRangeWithNullCF() {
AggregationClient aClient = new AggregationClient(conf);
Scan scan = new Scan();
scan.setStartRow(ROWS[6]);
scan.setStopRow(ROWS[17]);
final ColumnInterpreter<Double, Double, EmptyMsg, DoubleMsg, DoubleMsg> ci =
new DoubleColumnInterpreter();
Double std = null;
try {
std = aClient.std(TEST_TABLE, ci, scan);
} catch (Throwable e) {
}
assertEquals(null, std);// CP will throw an IOException about the
// null column family, and max will be set to 0
}
示例8: testStdWithValidRangeWithNullCF
import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
@Test (timeout=300000)
public void testStdWithValidRangeWithNullCF() {
AggregationClient aClient = new AggregationClient(conf);
Scan scan = new Scan();
scan.setStartRow(ROWS[6]);
scan.setStopRow(ROWS[17]);
final ColumnInterpreter<BigDecimal, BigDecimal, EmptyMsg, BigDecimalMsg, BigDecimalMsg> ci =
new BigDecimalColumnInterpreter();
Double std = null;
try {
std = aClient.std(TEST_TABLE, ci, scan);
} catch (Throwable e) {
}
assertEquals(null, std);// CP will throw an IOException about the
// null column family, and max will be set to 0
}
示例9: innerAddScanner
import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
private void innerAddScanner(HRegionLocation regionLocation) throws IOException {
Scan newScan = new Scan(rawScan);
if (regionLocation.getRegionInfo().getStartKey() != null)
newScan.setStartRow(regionLocation.getRegionInfo().getStartKey());
if (regionLocation.getRegionInfo().getEndKey() != null)
newScan.setStopRow(regionLocation.getRegionInfo().getEndKey());
newScan.setAttribute(IndexConstants.SCAN_WITH_INDEX, Bytes.toBytes("Hi"));
ResultScanner scanner = table.getScanner(newScan);
synchronized (scannerList) {
scannerList.add(scanner);
}
}
示例10: initScanner
import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
/**
* init selected range and scanner
*
* @throws IOException
*/
private void initScanner() throws IOException {
ScanRange selectedRange = null;
int selectedRegionNumber = Integer.MAX_VALUE;
for (ScanRange range : rangeList.getRanges()) {
int cover = countCoveringRegions(conn,
relation.getIndexTableName(range.getFamily(), range.getQualifier()), range.getStart(),
range.getStop());
LOG.info("LCDBG, " + cover + " regions are covered by range " + range);
if (selectedRegionNumber > cover) {
selectedRegionNumber = cover;
selectedRange = range;
}
}
LOG.info("LCDBG, GC Scanner using range " + selectedRange + " with " + selectedRegionNumber
+ " regions for scan id= " + rawScan.getId());
indexFamily = selectedRange.getFamily();
indexQualifier = selectedRange.getQualifier();
List<ScanRange> list = new ArrayList<>(rangeList.getRanges());
list.remove(selectedRange);
Scan scan = new Scan();
scan.setStartRow(selectedRange.getStart());
scan.setStopRow(selectedRange.getStop());
scan.setFamilyMap(rawScan.getFamilyMap());
scan.setCaching(rawScan.getCaching());
scan.setCacheBlocks(rawScan.getCacheBlocks());
scan.setId(rawScan.getId());
scan.setFilter(new ScanRange.ScanRangeList(list).toFilterList());
Table table = conn.getTable(
relation.getIndexTableName(selectedRange.getFamily(), selectedRange.getQualifier()));
scanner = table.getScanner(scan);
}
示例11: testMinWithValidRangeWithNoCQ
import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
@Test (timeout=300000)
public void testMinWithValidRangeWithNoCQ() throws Throwable {
AggregationClient aClient = new AggregationClient(conf);
Scan scan = new Scan();
scan.addFamily(TEST_FAMILY);
scan.setStartRow(HConstants.EMPTY_START_ROW);
scan.setStopRow(HConstants.EMPTY_END_ROW);
final ColumnInterpreter<Long, Long, EmptyMsg, LongMsg, LongMsg> ci =
new LongColumnInterpreter();
long min = aClient.min(TEST_TABLE, ci,
scan);
assertEquals(0, min);
}
示例12: testMinWithValidRangeWithNoCQ
import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
@Test (timeout=300000)
public void testMinWithValidRangeWithNoCQ() throws Throwable {
AggregationClient aClient = new AggregationClient(conf);
Scan scan = new Scan();
scan.addFamily(TEST_FAMILY);
scan.setStartRow(HConstants.EMPTY_START_ROW);
scan.setStopRow(HConstants.EMPTY_END_ROW);
final ColumnInterpreter<BigDecimal, BigDecimal, EmptyMsg, BigDecimalMsg, BigDecimalMsg> ci =
new BigDecimalColumnInterpreter();
BigDecimal min = aClient.min(TEST_TABLE, ci, scan);
assertEquals(new BigDecimal("0.00"), min);
}
示例13: testSumWithValidRange2WithNoCQ
import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
@Test (timeout=300000)
public void testSumWithValidRange2WithNoCQ() throws Throwable {
AggregationClient aClient = new AggregationClient(conf);
Scan scan = new Scan();
scan.addFamily(TEST_FAMILY);
scan.setStartRow(ROWS[6]);
scan.setStopRow(ROWS[7]);
final ColumnInterpreter<Long, Long, EmptyMsg, LongMsg, LongMsg> ci =
new LongColumnInterpreter();
long sum = aClient.sum(TEST_TABLE, ci, scan);
assertEquals(6 + 60, sum);
}
示例14: testScan
import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
/**
* Tests a MR scan using specific start and stop rows.
*
* @throws IOException
* @throws ClassNotFoundException
* @throws InterruptedException
*/
private void testScan(String start, String stop, String last)
throws IOException, InterruptedException, ClassNotFoundException {
String jobName =
"Scan" + (start != null ? start.toUpperCase() : "Empty") + "To" +
(stop != null ? stop.toUpperCase() : "Empty");
LOG.info("Before map/reduce startup - job " + jobName);
Configuration c = new Configuration(TEST_UTIL.getConfiguration());
c.set(KEY_STARTROW, start != null ? start : "");
c.set(KEY_LASTROW, last != null ? last : "");
List<Scan> scans = new ArrayList<Scan>();
for (String tableName : TABLES) {
Scan scan = new Scan();
scan.addFamily(INPUT_FAMILY);
scan.setAttribute(Scan.SCAN_ATTRIBUTES_TABLE_NAME, Bytes.toBytes(tableName));
if (start != null) {
scan.setStartRow(Bytes.toBytes(start));
}
if (stop != null) {
scan.setStopRow(Bytes.toBytes(stop));
}
scans.add(scan);
LOG.info("scan before: " + scan);
}
runJob(jobName, c, scans);
}
示例15: testScan
import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
/**
* Tests a MR scan using specific start and stop rows.
*
* @throws IOException
* @throws ClassNotFoundException
* @throws InterruptedException
*/
protected void testScan(String start, String stop, String last)
throws IOException, InterruptedException, ClassNotFoundException {
String jobName = "Scan" + (start != null ? start.toUpperCase() : "Empty") +
"To" + (stop != null ? stop.toUpperCase() : "Empty");
LOG.info("Before map/reduce startup - job " + jobName);
Configuration c = new Configuration(TEST_UTIL.getConfiguration());
Scan scan = new Scan();
scan.addFamily(INPUT_FAMILY);
if (start != null) {
scan.setStartRow(Bytes.toBytes(start));
}
c.set(KEY_STARTROW, start != null ? start : "");
if (stop != null) {
scan.setStopRow(Bytes.toBytes(stop));
}
c.set(KEY_LASTROW, last != null ? last : "");
LOG.info("scan before: " + scan);
Job job = new Job(c, jobName);
TableMapReduceUtil.initTableMapperJob(
Bytes.toString(TABLE_NAME), scan, ScanMapper.class,
ImmutableBytesWritable.class, ImmutableBytesWritable.class, job);
job.setReducerClass(ScanReducer.class);
job.setNumReduceTasks(1); // one to get final "first" and "last" key
FileOutputFormat.setOutputPath(job,
new Path(TEST_UTIL.getDataTestDir(), job.getJobName()));
LOG.info("Started " + job.getJobName());
assertTrue(job.waitForCompletion(true));
LOG.info("After map/reduce completion - job " + jobName);
}