本文整理汇总了Java中org.apache.hadoop.hbase.client.Scan.addFamily方法的典型用法代码示例。如果您正苦于以下问题:Java Scan.addFamily方法的具体用法?Java Scan.addFamily怎么用?Java Scan.addFamily使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hbase.client.Scan
的用法示例。
在下文中一共展示了Scan.addFamily方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: configure
import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
@Override
public void configure(JobConf job) {
try {
Connection connection = ConnectionFactory.createConnection(HBaseConfiguration.create(job));
TableName tableName = TableName.valueOf("exampleJobConfigurableTable");
// mandatory
initializeTable(connection, tableName);
byte[][] inputColumns = new byte [][] { Bytes.toBytes("columnA"),
Bytes.toBytes("columnB") };
//optional
Scan scan = new Scan();
for (byte[] family : inputColumns) {
scan.addFamily(family);
}
Filter exampleFilter = new RowFilter(CompareOp.EQUAL, new RegexStringComparator("aa.*"));
scan.setFilter(exampleFilter);
setScan(scan);
} catch (IOException exception) {
throw new RuntimeException("Failed to initialize.", exception);
}
}
示例2: testMinWithInvalidRange2
import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
@Test (timeout=300000)
public void testMinWithInvalidRange2() {
AggregationClient aClient = new AggregationClient(conf);
Scan scan = new Scan();
scan.addFamily(TEST_FAMILY);
scan.setStartRow(ROWS[6]);
scan.setStopRow(ROWS[6]);
final ColumnInterpreter<BigDecimal, BigDecimal, EmptyMsg, BigDecimalMsg, BigDecimalMsg> ci =
new BigDecimalColumnInterpreter();
BigDecimal min = null;
try {
min = aClient.min(TEST_TABLE, ci, scan);
} catch (Throwable e) {
}
assertEquals(null, min);// control should go to the catch block
}
示例3: testMinWithInvalidRange2
import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
@Test (timeout=300000)
public void testMinWithInvalidRange2() {
AggregationClient aClient = new AggregationClient(conf);
Scan scan = new Scan();
scan.addFamily(TEST_FAMILY);
scan.setStartRow(ROWS[6]);
scan.setStopRow(ROWS[6]);
final ColumnInterpreter<Long, Long, EmptyMsg, LongMsg, LongMsg> ci =
new LongColumnInterpreter();
Long min = null;
try {
min = aClient.min(TEST_TABLE, ci, scan);
} catch (Throwable e) {
}
assertEquals(null, min);// control should go to the catch block
}
示例4: initialize
import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
@Override
protected void initialize(JobContext job) throws IOException {
Connection connection = ConnectionFactory.createConnection(HBaseConfiguration.create(
job.getConfiguration()));
TableName tableName = TableName.valueOf("exampleTable");
// mandatory
initializeTable(connection, tableName);
byte[][] inputColumns = new byte [][] { Bytes.toBytes("columnA"),
Bytes.toBytes("columnB") };
//optional
Scan scan = new Scan();
for (byte[] family : inputColumns) {
scan.addFamily(family);
}
Filter exampleFilter = new RowFilter(CompareOp.EQUAL, new RegexStringComparator("aa.*"));
scan.setFilter(exampleFilter);
setScan(scan);
}
示例5: testNumOfSplits
import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
/**
* Tests a MR scan using data skew auto-balance
*
* @throws IOException
* @throws ClassNotFoundException
* @throws InterruptedException
*/
public void testNumOfSplits(String ratio, int expectedNumOfSplits) throws IOException,
InterruptedException,
ClassNotFoundException {
String jobName = "TestJobForNumOfSplits";
LOG.info("Before map/reduce startup - job " + jobName);
Configuration c = new Configuration(TEST_UTIL.getConfiguration());
Scan scan = new Scan();
scan.addFamily(INPUT_FAMILY);
c.set("hbase.mapreduce.input.autobalance", "true");
c.set("hbase.mapreduce.input.autobalance.maxskewratio", ratio);
c.set(KEY_STARTROW, "");
c.set(KEY_LASTROW, "");
Job job = new Job(c, jobName);
TableMapReduceUtil.initTableMapperJob(Bytes.toString(TABLE_NAME), scan, ScanMapper.class,
ImmutableBytesWritable.class, ImmutableBytesWritable.class, job);
TableInputFormat tif = new TableInputFormat();
tif.setConf(job.getConfiguration());
Assert.assertEquals(new String(TABLE_NAME), new String(table.getTableName()));
List<InputSplit> splits = tif.getSplits(job);
Assert.assertEquals(expectedNumOfSplits, splits.size());
}
示例6: testSumWithInvalidRange
import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
@Test (timeout=300000)
public void testSumWithInvalidRange() {
AggregationClient aClient = new AggregationClient(conf);
Scan scan = new Scan();
scan.addFamily(TEST_FAMILY);
scan.setStartRow(ROWS[6]);
scan.setStopRow(ROWS[2]);
final ColumnInterpreter<BigDecimal, BigDecimal, EmptyMsg, BigDecimalMsg, BigDecimalMsg> ci =
new BigDecimalColumnInterpreter();
BigDecimal sum = null;
try {
sum = aClient.sum(TEST_TABLE, ci, scan);
} catch (Throwable e) {
}
assertEquals(null, sum);// control should go to the catch block
}
示例7: getRowOrBefore
import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
public Result getRowOrBefore(Table table, byte[] row, byte[] family) throws IOException {
long start = System.currentTimeMillis();
Scan scan = new Scan();
scan.addFamily(family);
scan.setReversed(true);
scan.setStartRow(row);
scan.setCacheBlocks(false);
scan.setCaching(1);
scan.setSmall(true);
ResultScanner scanner = table.getScanner(scan);
Result ret = scanner.next();
scanner.close();
prevRowTotalTime += System.currentTimeMillis() - start;
prevRowTotalCount++;
return ret;
}
示例8: testStdWithInvalidRange
import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
@Test (timeout=300000)
public void testStdWithInvalidRange() {
AggregationClient aClient = new AggregationClient(conf);
Scan scan = new Scan();
scan.addFamily(TEST_FAMILY);
scan.setStartRow(ROWS[6]);
scan.setStopRow(ROWS[1]);
final ColumnInterpreter<Long, Long, EmptyMsg, LongMsg, LongMsg> ci =
new LongColumnInterpreter();
Double std = null;
try {
std = aClient.std(TEST_TABLE, ci, scan);
} catch (Throwable e) {
}
assertEquals(null, std);// control should go to the catch block
}
示例9: testBulkDeleteFamily
import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
public void testBulkDeleteFamily() throws Throwable {
TableName tableName = TableName.valueOf("testBulkDeleteFamily");
HTableDescriptor htd = new HTableDescriptor(tableName);
htd.addFamily(new HColumnDescriptor(FAMILY1));
htd.addFamily(new HColumnDescriptor(FAMILY2));
TEST_UTIL.getHBaseAdmin().createTable(htd, Bytes.toBytes(0), Bytes.toBytes(120), 5);
Table ht = new HTable(TEST_UTIL.getConfiguration(), tableName);
List<Put> puts = new ArrayList<Put>(100);
for (int j = 0; j < 100; j++) {
Put put = new Put(Bytes.toBytes(j));
put.add(FAMILY1, QUALIFIER1, "v1".getBytes());
put.add(FAMILY2, QUALIFIER2, "v2".getBytes());
puts.add(put);
}
ht.put(puts);
Scan scan = new Scan();
scan.addFamily(FAMILY1);
// Delete the column family cf1
long noOfRowsDeleted = invokeBulkDeleteProtocol(tableName, scan, 500, DeleteType.FAMILY, null);
assertEquals(100, noOfRowsDeleted);
int rows = 0;
for (Result result : ht.getScanner(new Scan())) {
assertTrue(result.getFamilyMap(FAMILY1).isEmpty());
assertEquals(1, result.getColumnCells(FAMILY2, QUALIFIER2).size());
rows++;
}
assertEquals(100, rows);
ht.close();
}
示例10: buildScanner
import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
private InternalScanner buildScanner(String keyPrefix, String value, HRegion r)
throws IOException {
// Defaults FilterList.Operator.MUST_PASS_ALL.
FilterList allFilters = new FilterList();
allFilters.addFilter(new PrefixFilter(Bytes.toBytes(keyPrefix)));
// Only return rows where this column value exists in the row.
SingleColumnValueFilter filter = new SingleColumnValueFilter(Bytes.toBytes("trans-tags"),
Bytes.toBytes("qual2"), CompareOp.EQUAL, Bytes.toBytes(value));
filter.setFilterIfMissing(true);
allFilters.addFilter(filter);
Scan scan = new Scan();
scan.addFamily(Bytes.toBytes("trans-blob"));
scan.addFamily(Bytes.toBytes("trans-type"));
scan.addFamily(Bytes.toBytes("trans-date"));
scan.addFamily(Bytes.toBytes("trans-tags"));
scan.addFamily(Bytes.toBytes("trans-group"));
scan.setFilter(allFilters);
return r.getScanner(scan);
}
示例11: testAvgWithValidRangeWithNoCQ
import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
@Test (timeout=300000)
public void testAvgWithValidRangeWithNoCQ() throws Throwable {
AggregationClient aClient = new AggregationClient(conf);
Scan scan = new Scan();
scan.addFamily(TEST_FAMILY);
final ColumnInterpreter<BigDecimal, BigDecimal, EmptyMsg, BigDecimalMsg, BigDecimalMsg> ci =
new BigDecimalColumnInterpreter();
double avg = aClient.avg(TEST_TABLE, ci, scan);
assertEquals(10.45, avg, 0.01);
}
示例12: testReverseScanWithPadding
import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
@Test
public void testReverseScanWithPadding() throws Exception {
byte[] terminator = new byte[] { -1 };
byte[] row1 = Bytes.add(invert(Bytes.toBytes("a")), terminator);
byte[] row2 = Bytes.add(invert(Bytes.toBytes("ab")), terminator);
byte[] row3 = Bytes.add(invert(Bytes.toBytes("b")), terminator);
Put put1 = new Put(row1);
put1.addColumn(cfName, cqName, HConstants.EMPTY_BYTE_ARRAY);
Put put2 = new Put(row2);
put2.addColumn(cfName, cqName, HConstants.EMPTY_BYTE_ARRAY);
Put put3 = new Put(row3);
put3.addColumn(cfName, cqName, HConstants.EMPTY_BYTE_ARRAY);
region.put(put1);
region.put(put2);
region.put(put3);
region.flush(true);
Scan scan = new Scan();
scan.setCacheBlocks(false);
scan.setReversed(true);
scan.setFilter(new FirstKeyOnlyFilter());
scan.addFamily(cfName);
RegionScanner scanner = region.getScanner(scan);
List<Cell> res = new ArrayList<Cell>();
int count = 1;
while (scanner.next(res)) {
count++;
}
assertEquals(3, count);
}
示例13: testMinWithValidRangeWithNoCQ
import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
@Test (timeout=300000)
public void testMinWithValidRangeWithNoCQ() throws Throwable {
AggregationClient aClient = new AggregationClient(conf);
Scan scan = new Scan();
scan.addFamily(TEST_FAMILY);
scan.setStartRow(HConstants.EMPTY_START_ROW);
scan.setStopRow(HConstants.EMPTY_END_ROW);
final ColumnInterpreter<Long, Long, EmptyMsg, LongMsg, LongMsg> ci =
new LongColumnInterpreter();
long min = aClient.min(TEST_TABLE, ci,
scan);
assertEquals(0, min);
}
示例14: scanWithAllQualifiersOfFamiliyA
import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
/**
* Test the filter by adding all columns of family A in the scan. (OK)
*/
@Test
public void scanWithAllQualifiersOfFamiliyA() throws IOException {
/* Given */
Scan scan = new Scan();
scan.addFamily(FAMILY_A);
scan.setFilter(scanFilter);
verify(scan);
}
示例15: testSumWithValidRangeWithNoCQ
import org.apache.hadoop.hbase.client.Scan; //导入方法依赖的package包/类
@Test (timeout=300000)
public void testSumWithValidRangeWithNoCQ() throws Throwable {
AggregationClient aClient = new AggregationClient(conf);
Scan scan = new Scan();
scan.addFamily(TEST_FAMILY);
final ColumnInterpreter<BigDecimal, BigDecimal, EmptyMsg, BigDecimalMsg, BigDecimalMsg> ci =
new BigDecimalColumnInterpreter();
BigDecimal sum = aClient.sum(TEST_TABLE, ci, scan);
assertEquals(new BigDecimal("209.00"), sum); // 190 + 19
}