本文整理汇总了Java中org.apache.hadoop.hbase.index.filter.SingleColumnValuePartitionFilter类的典型用法代码示例。如果您正苦于以下问题:Java SingleColumnValuePartitionFilter类的具体用法?Java SingleColumnValuePartitionFilter怎么用?Java SingleColumnValuePartitionFilter使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
SingleColumnValuePartitionFilter类属于org.apache.hadoop.hbase.index.filter包,在下文中一共展示了SingleColumnValuePartitionFilter类的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: doFiltersRestruct
import org.apache.hadoop.hbase.index.filter.SingleColumnValuePartitionFilter; //导入依赖的package包/类
Filter doFiltersRestruct(Filter filter) {
if (filter instanceof SingleColumnValueFilter) {
ValuePartition vp = null;
if (filter instanceof SingleColumnValuePartitionFilter) {
vp = ((SingleColumnValuePartitionFilter) filter).getValuePartition();
}
SingleColumnValueFilter scvf = (SingleColumnValueFilter) filter;
if (scvf.getOperator().equals(CompareOp.LESS)
|| scvf.getOperator().equals(CompareOp.LESS_OR_EQUAL)
|| scvf.getOperator().equals(CompareOp.GREATER)
|| scvf.getOperator().equals(CompareOp.GREATER_OR_EQUAL)) {
return new SingleColumnRangeFilter(scvf.getFamily(), scvf.getQualifier(), vp, scvf
.getComparator().getValue(), scvf.getOperator(), null, null);
}
}
FilterGroupingWorker groupWorker = new FilterGroupingWorker();
return groupWorker.group(filter);
}
示例2: selectBestFitAndPossibleIndicesForSCVF
import org.apache.hadoop.hbase.index.filter.SingleColumnValuePartitionFilter; //导入依赖的package包/类
private FilterNode selectBestFitAndPossibleIndicesForSCVF(List<IndexSpecification> indices,
SingleColumnValueFilter filter) {
if (CompareOp.NOT_EQUAL == filter.getOperator() || CompareOp.NO_OP == filter.getOperator()) {
return new NoIndexFilterNode();
}
FilterColumnValueDetail detail = null;
if (filter instanceof SingleColumnValuePartitionFilter) {
SingleColumnValuePartitionFilter escvf = (SingleColumnValuePartitionFilter) filter;
detail =
new FilterColumnValueDetail(escvf.getFamily(), escvf.getQualifier(), escvf
.getComparator().getValue(), escvf.getValuePartition(), escvf.getOperator());
} else {
detail =
new FilterColumnValueDetail(filter.getFamily(), filter.getQualifier(), filter
.getComparator().getValue(), filter.getOperator());
}
return selectBestFitIndexForColumn(indices, detail);
}
示例3: testSeparatorPartition
import org.apache.hadoop.hbase.index.filter.SingleColumnValuePartitionFilter; //导入依赖的package包/类
@Test(timeout = 180000)
public void testSeparatorPartition() throws Exception {
Configuration conf = UTIL.getConfiguration();
String userTableName = "testSeparatorPartition";
HTableDescriptor ihtd = new HTableDescriptor(TableName.valueOf(userTableName));
HColumnDescriptor hcd = new HColumnDescriptor("cf1");
ihtd.addFamily(hcd);
ValuePartition vp = new SeparatorPartition("_", 3);
IndexSpecification iSpec = new IndexSpecification("idx1");
iSpec.addIndexColumn(hcd, "cq", vp, ValueType.String, 200);
TableIndices indices = new TableIndices();
indices.addIndex(iSpec);
ihtd.setValue(Constants.INDEX_SPEC_KEY, indices.toByteArray());
admin.createTable(ihtd);
HTable table = new HTable(conf, "testSeparatorPartition");
byte[] value1 = "2ndFloor_solitaire_huawei_bangalore_karnataka".getBytes();
Put p = new Put("row".getBytes());
p.add("cf1".getBytes(), "cq".getBytes(), value1);
table.put(p);
p = new Put("row2".getBytes());
p.add("cf1".getBytes(), "cq".getBytes(),
"7thFloor_solitaire_huawei_bangalore_karnataka".getBytes());
table.put(p);
p = new Put("row3".getBytes());
p.add("cf1".getBytes(), "cq".getBytes(), "rrr_sss_hhh_bangalore_karnataka".getBytes());
table.put(p);
Scan scan = new Scan();
scan.setCaching(1);
scan.setFilter(new SingleColumnValuePartitionFilter(hcd.getName(), "cq".getBytes(),
CompareOp.EQUAL, "huawei".getBytes(), vp));
int i = 0;
ResultScanner scanner = table.getScanner(scan);
List<Result> testRes = new ArrayList<Result>();
Result[] result = scanner.next(1);
while (result != null && result.length > 0) {
testRes.add(result[0]);
i++;
result = scanner.next(1);
}
assertTrue("Index flow should get used.", IndexRegionObserver.getIndexedFlowUsed());
assertTrue("Seekpoints should get added by index scanner",
IndexRegionObserver.getSeekpointAdded());
assertEquals("It should get two seek points from index scanner.", 2, IndexRegionObserver
.getMultipleSeekPoints().size());
assertEquals("Overall result should have only 2 rows", 2, testRes.size());
}
示例4: testSpatialPartition
import org.apache.hadoop.hbase.index.filter.SingleColumnValuePartitionFilter; //导入依赖的package包/类
@Test(timeout = 180000)
public void testSpatialPartition() throws Exception {
Configuration conf = UTIL.getConfiguration();
String userTableName = "testSpatialPartition";
HTableDescriptor ihtd = new HTableDescriptor(TableName.valueOf(userTableName));
HColumnDescriptor hcd = new HColumnDescriptor("cf1");
ihtd.addFamily(hcd);
ValuePartition vp = new SpatialPartition(2, 3);
IndexSpecification iSpec = new IndexSpecification("idx1");
iSpec.addIndexColumn(hcd, "cq", vp, ValueType.String, 200);
TableIndices indices = new TableIndices();
indices.addIndex(iSpec);
ihtd.setValue(Constants.INDEX_SPEC_KEY, indices.toByteArray());
admin.createTable(ihtd);
HTable table = new HTable(conf, "testSpatialPartition");
byte[] value1 = "helloworld".getBytes();
Put p = new Put("row".getBytes());
p.add("cf1".getBytes(), "cq".getBytes(), value1);
table.put(p);
p = new Put("row2".getBytes());
p.add("cf1".getBytes(), "cq".getBytes(), "spatial".getBytes());
table.put(p);
p = new Put("row3".getBytes());
p.add("cf1".getBytes(), "cq".getBytes(), "partition".getBytes());
table.put(p);
Scan scan = new Scan();
scan.setFilter(new SingleColumnValuePartitionFilter(hcd.getName(), "cq".getBytes(),
CompareOp.EQUAL, "rti".getBytes(), vp));
int i = 0;
ResultScanner scanner = table.getScanner(scan);
List<Result> testRes = new ArrayList<Result>();
Result[] result = scanner.next(1);
while (result != null && result.length > 0) {
testRes.add(result[0]);
i++;
result = scanner.next(1);
}
assertTrue("Index flow should get used.", IndexRegionObserver.getIndexedFlowUsed());
assertTrue("Seekpoints should get added by index scanner",
IndexRegionObserver.getSeekpointAdded());
assertEquals("It should get 1 seek point from index scanner.", 1, IndexRegionObserver
.getMultipleSeekPoints().size());
assertEquals("Overall result should have only 1 rows", 1, testRes.size());
}
示例5: testSingleColumnValuePartitionFilterBySettingAsAttributeToScan
import org.apache.hadoop.hbase.index.filter.SingleColumnValuePartitionFilter; //导入依赖的package包/类
@Test(timeout = 180000)
public void testSingleColumnValuePartitionFilterBySettingAsAttributeToScan() throws Exception {
Configuration conf = UTIL.getConfiguration();
String userTableName = "testSingleColumnValuePartitionFilterBySettingAsAttributeToScan";
HTableDescriptor ihtd = new HTableDescriptor(TableName.valueOf(userTableName));
TableIndices indices = new TableIndices();
HColumnDescriptor hcd = new HColumnDescriptor("cf1");
ihtd.addFamily(hcd);
ValuePartition vp = new SeparatorPartition("_", 3);
IndexSpecification iSpec = new IndexSpecification("idx1");
iSpec.addIndexColumn(hcd, "cq", vp, ValueType.String, 200);
indices.addIndex(iSpec);
ihtd.setValue(Constants.INDEX_SPEC_KEY, indices.toByteArray());
admin.createTable(ihtd);
HTable table = new HTable(conf, userTableName);
byte[] value1 = "2ndFloor_solitaire_huawei_bangalore_karnataka".getBytes();
Put p = new Put("row".getBytes());
p.add("cf1".getBytes(), "cq".getBytes(), value1);
table.put(p);
p = new Put("row2".getBytes());
p.add("cf1".getBytes(), "cq".getBytes(),
"7thFloor_solitaire_huawei_bangalore_karnataka".getBytes());
table.put(p);
p = new Put("row3".getBytes());
p.add("cf1".getBytes(), "cq".getBytes(), "rrr_sss_hhh_bangalore_karnataka".getBytes());
table.put(p);
Scan scan = new Scan();
SingleIndexExpression singleIndexExpression = new SingleIndexExpression("idx1");
byte[] value = "huawei".getBytes();
Column column = new Column("cf1".getBytes(), "cq".getBytes(), vp);
EqualsExpression equalsExpression = new EqualsExpression(column, value);
singleIndexExpression.addEqualsExpression(equalsExpression);
scan.setAttribute(Constants.INDEX_EXPRESSION, IndexUtils.toBytes(singleIndexExpression));
scan.setFilter(new SingleColumnValuePartitionFilter(hcd.getName(), "cq".getBytes(),
CompareOp.EQUAL, "huawei".getBytes(), vp));
int i = 0;
ResultScanner scanner = table.getScanner(scan);
List<Result> testRes = new ArrayList<Result>();
Result[] result = scanner.next(1);
while (result != null && result.length > 0) {
testRes.add(result[0]);
i++;
result = scanner.next(1);
}
assertTrue("Index flow should get used.", IndexRegionObserver.getIndexedFlowUsed());
assertTrue("Seekpoints should get added by index scanner",
IndexRegionObserver.getSeekpointAdded());
assertEquals("It should get two seek points from index scanner.", 2, IndexRegionObserver
.getMultipleSeekPoints().size());
assertTrue("Overall result should have only 2 rows", testRes.size() == 2);
}
示例6: testSeparatorPartition
import org.apache.hadoop.hbase.index.filter.SingleColumnValuePartitionFilter; //导入依赖的package包/类
@Test(timeout = 180000)
public void testSeparatorPartition() throws Exception {
HBaseAdmin admin = UTIL.getHBaseAdmin();
Configuration conf = UTIL.getConfiguration();
String userTableName = "testSeparatorPartition";
IndexedHTableDescriptor ihtd = new IndexedHTableDescriptor(userTableName);
HColumnDescriptor hcd = new HColumnDescriptor("cf1");
ihtd.addFamily(hcd);
ValuePartition vp = new SeparatorPartition("_", 3);
IndexSpecification iSpec = new IndexSpecification("idx1");
iSpec.addIndexColumn(hcd, "cq", vp, ValueType.String, 200);
ihtd.addIndex(iSpec);
admin.createTable(ihtd);
HTable table = new HTable(conf, "testSeparatorPartition");
byte[] value1 = "2ndFloor_solitaire_huawei_bangalore_karnataka".getBytes();
Put p = new Put("row".getBytes());
p.add("cf1".getBytes(), "cq".getBytes(), value1);
table.put(p);
p = new Put("row2".getBytes());
p.add("cf1".getBytes(), "cq".getBytes(),
"7thFloor_solitaire_huawei_bangalore_karnataka".getBytes());
table.put(p);
p = new Put("row3".getBytes());
p.add("cf1".getBytes(), "cq".getBytes(), "rrr_sss_hhh_bangalore_karnataka".getBytes());
table.put(p);
Scan scan = new Scan();
scan.setFilter(new SingleColumnValuePartitionFilter(hcd.getName(), "cq".getBytes(),
CompareOp.EQUAL, "huawei".getBytes(), vp));
int i = 0;
ResultScanner scanner = table.getScanner(scan);
List<Result> testRes = new ArrayList<Result>();
Result[] result = scanner.next(1);
while (result != null && result.length > 0) {
testRes.add(result[0]);
i++;
result = scanner.next(1);
}
Assert.assertTrue("Index flow should get used.", IndexRegionObserver.getIndexedFlowUsed());
Assert.assertTrue("Seekpoints should get added by index scanner",
IndexRegionObserver.getSeekpointAdded());
Assert.assertEquals("It should get two seek points from index scanner.", 2, IndexRegionObserver
.getMultipleSeekPoints().size());
Assert.assertTrue("Overall result should have only 2 rows", testRes.size() == 2);
}
示例7: testSpatialPartition
import org.apache.hadoop.hbase.index.filter.SingleColumnValuePartitionFilter; //导入依赖的package包/类
@Test(timeout = 180000)
public void testSpatialPartition() throws Exception {
HBaseAdmin admin = UTIL.getHBaseAdmin();
Configuration conf = UTIL.getConfiguration();
String userTableName = "testSpatialPartition";
IndexedHTableDescriptor ihtd = new IndexedHTableDescriptor(userTableName);
HColumnDescriptor hcd = new HColumnDescriptor("cf1");
ihtd.addFamily(hcd);
ValuePartition vp = new SpatialPartition(2, 3);
IndexSpecification iSpec = new IndexSpecification("idx1");
iSpec.addIndexColumn(hcd, "cq", vp, ValueType.String, 200);
ihtd.addIndex(iSpec);
admin.createTable(ihtd);
HTable table = new HTable(conf, "testSpatialPartition");
byte[] value1 = "helloworld".getBytes();
Put p = new Put("row".getBytes());
p.add("cf1".getBytes(), "cq".getBytes(), value1);
table.put(p);
p = new Put("row2".getBytes());
p.add("cf1".getBytes(), "cq".getBytes(), "spatial".getBytes());
table.put(p);
p = new Put("row3".getBytes());
p.add("cf1".getBytes(), "cq".getBytes(), "partition".getBytes());
table.put(p);
Scan scan = new Scan();
scan.setFilter(new SingleColumnValuePartitionFilter(hcd.getName(), "cq".getBytes(),
CompareOp.LESS_OR_EQUAL, "rti".getBytes(), vp));
int i = 0;
ResultScanner scanner = table.getScanner(scan);
List<Result> testRes = new ArrayList<Result>();
Result[] result = scanner.next(1);
while (result != null && result.length > 0) {
testRes.add(result[0]);
i++;
result = scanner.next(1);
}
Assert.assertTrue("Index flow should get used.", IndexRegionObserver.getIndexedFlowUsed());
Assert.assertTrue("Seekpoints should get added by index scanner",
IndexRegionObserver.getSeekpointAdded());
Assert.assertEquals("It should get two seek points from index scanner.", 3, IndexRegionObserver
.getMultipleSeekPoints().size());
Assert.assertTrue("Overall result should have only 1 rows", testRes.size() == 3);
}
示例8: testSpatialPartitionIfMulitplePartsOfValueAreIndexedByDifferentIndicesOnSameColumn
import org.apache.hadoop.hbase.index.filter.SingleColumnValuePartitionFilter; //导入依赖的package包/类
@Test(timeout = 180000)
public void testSpatialPartitionIfMulitplePartsOfValueAreIndexedByDifferentIndicesOnSameColumn()
throws Exception {
HBaseAdmin admin = UTIL.getHBaseAdmin();
Configuration conf = UTIL.getConfiguration();
String userTableName =
"testSpatialPartitionIfMulitplePartsOfValueAreIndexedByDifferentIndicesOnSameColumn";
IndexedHTableDescriptor ihtd = new IndexedHTableDescriptor(userTableName);
HColumnDescriptor hcd = new HColumnDescriptor("cf1");
ihtd.addFamily(hcd);
ValuePartition vp = new SpatialPartition(2, 3);
IndexSpecification iSpec = new IndexSpecification("idx1");
iSpec.addIndexColumn(hcd, "cq", vp, ValueType.String, 200);
ihtd.addIndex(iSpec);
ValuePartition vp2 = new SpatialPartition(5, 2);
iSpec = new IndexSpecification("idx2");
iSpec.addIndexColumn(hcd, "cq", vp2, ValueType.String, 200);
ihtd.addIndex(iSpec);
admin.createTable(ihtd);
HTable table = new HTable(conf, userTableName);
byte[] value1 = "helloworldmultiple".getBytes();
Put p = new Put("row".getBytes());
p.add("cf1".getBytes(), "cq".getBytes(), value1);
table.put(p);
p = new Put("row2".getBytes());
p.add("cf1".getBytes(), "cq".getBytes(), "spatialmultiple".getBytes());
table.put(p);
p = new Put("row3".getBytes());
p.add("cf1".getBytes(), "cq".getBytes(), "partitionmultiple".getBytes());
table.put(p);
FilterList masterFilter = new FilterList(Operator.MUST_PASS_ALL);
masterFilter.addFilter(new SingleColumnValuePartitionFilter(hcd.getName(), "cq".getBytes(),
CompareOp.EQUAL, "rti".getBytes(), vp));
masterFilter.addFilter(new SingleColumnValuePartitionFilter(hcd.getName(), "cq".getBytes(),
CompareOp.GREATER_OR_EQUAL, "ti".getBytes(), vp2));
Scan scan = new Scan();
scan.setFilter(masterFilter);
int i = 0;
ResultScanner scanner = table.getScanner(scan);
List<Result> testRes = new ArrayList<Result>();
Result[] result = scanner.next(1);
while (result != null && result.length > 0) {
testRes.add(result[0]);
i++;
result = scanner.next(1);
}
Assert.assertTrue("Index flow should get used.", IndexRegionObserver.getIndexedFlowUsed());
Assert.assertTrue("Seekpoints should get added by index scanner",
IndexRegionObserver.getSeekpointAdded());
Assert.assertEquals("It should get two seek points from index scanner.", 1, IndexRegionObserver
.getMultipleSeekPoints().size());
Assert.assertTrue("Overall result should have only 1 rows", testRes.size() == 1);
masterFilter = new FilterList(Operator.MUST_PASS_ONE);
masterFilter.addFilter(new SingleColumnValuePartitionFilter(hcd.getName(), "cq".getBytes(),
CompareOp.LESS, "rti".getBytes(), vp));
masterFilter.addFilter(new SingleColumnValuePartitionFilter(hcd.getName(), "cq".getBytes(),
CompareOp.GREATER, "ti".getBytes(), vp2));
scan = new Scan();
scan.setFilter(masterFilter);
i = 0;
scanner = table.getScanner(scan);
testRes = new ArrayList<Result>();
result = scanner.next(1);
while (result != null && result.length > 0) {
testRes.add(result[0]);
i++;
result = scanner.next(1);
}
Assert.assertTrue("Index flow should get used.", IndexRegionObserver.getIndexedFlowUsed());
Assert.assertTrue("Seekpoints should get added by index scanner",
IndexRegionObserver.getSeekpointAdded());
Assert.assertEquals("It should get two seek points from index scanner.", 3, IndexRegionObserver
.getMultipleSeekPoints().size());
Assert.assertTrue("Overall result should have only 2 rows", testRes.size() == 2);
}
示例9: testSeparatorPartitionIfMulitplePartsOfValueAreIndexedByDifferentIndicesOnSameColumn
import org.apache.hadoop.hbase.index.filter.SingleColumnValuePartitionFilter; //导入依赖的package包/类
@Test(timeout = 180000)
public void
testSeparatorPartitionIfMulitplePartsOfValueAreIndexedByDifferentIndicesOnSameColumn()
throws Exception {
HBaseAdmin admin = UTIL.getHBaseAdmin();
Configuration conf = UTIL.getConfiguration();
String userTableName =
"testSeparatorPartitionIfMulitplePartsOfValueAreIndexedByDifferentIndicesOnSameColumn";
IndexedHTableDescriptor ihtd = new IndexedHTableDescriptor(userTableName);
HColumnDescriptor hcd = new HColumnDescriptor("cf1");
ihtd.addFamily(hcd);
ValuePartition vp = new SeparatorPartition("--", 3);
IndexSpecification iSpec = new IndexSpecification("idx1");
iSpec.addIndexColumn(hcd, "cq", vp, ValueType.String, 200);
ihtd.addIndex(iSpec);
ValuePartition vp2 = new SeparatorPartition("--", 2);
iSpec = new IndexSpecification("idx2");
iSpec.addIndexColumn(hcd, "cq", vp2, ValueType.String, 200);
ihtd.addIndex(iSpec);
admin.createTable(ihtd);
HTable table = new HTable(conf, userTableName);
byte[] value1 = "hello--world--multiple--1".getBytes();
Put p = new Put("row".getBytes());
p.add("cf1".getBytes(), "cq".getBytes(), value1);
table.put(p);
p = new Put("row2".getBytes());
p.add("cf1".getBytes(), "cq".getBytes(), "spatial--partition--multiple".getBytes());
table.put(p);
p = new Put("row3".getBytes());
p.add("cf1".getBytes(), "cq".getBytes(), "partition--by--separator--multiple".getBytes());
table.put(p);
FilterList masterFilter = new FilterList(Operator.MUST_PASS_ALL);
masterFilter.addFilter(new SingleColumnValuePartitionFilter(hcd.getName(), "cq".getBytes(),
CompareOp.EQUAL, "multiple".getBytes(), vp));
masterFilter.addFilter(new SingleColumnValuePartitionFilter(hcd.getName(), "cq".getBytes(),
CompareOp.GREATER_OR_EQUAL, "by".getBytes(), vp2));
Scan scan = new Scan();
scan.setFilter(masterFilter);
int i = 0;
ResultScanner scanner = table.getScanner(scan);
List<Result> testRes = new ArrayList<Result>();
Result[] result = scanner.next(1);
while (result != null && result.length > 0) {
testRes.add(result[0]);
i++;
result = scanner.next(1);
}
Assert.assertTrue("Index flow should get used.", IndexRegionObserver.getIndexedFlowUsed());
Assert.assertTrue("Seekpoints should get added by index scanner",
IndexRegionObserver.getSeekpointAdded());
Assert.assertEquals("It should get two seek points from index scanner.", 2, IndexRegionObserver
.getMultipleSeekPoints().size());
Assert.assertTrue("Overall result should have only 1 rows", testRes.size() == 2);
masterFilter = new FilterList(Operator.MUST_PASS_ONE);
masterFilter.addFilter(new SingleColumnValuePartitionFilter(hcd.getName(), "cq".getBytes(),
CompareOp.GREATER_OR_EQUAL, "person".getBytes(), vp));
masterFilter.addFilter(new SingleColumnValuePartitionFilter(hcd.getName(), "cq".getBytes(),
CompareOp.LESS, "multiple".getBytes(), vp2));
scan = new Scan();
scan.setFilter(masterFilter);
i = 0;
scanner = table.getScanner(scan);
testRes = new ArrayList<Result>();
result = scanner.next(1);
while (result != null && result.length > 0) {
testRes.add(result[0]);
i++;
result = scanner.next(1);
}
Assert.assertTrue("Index flow should get used.", IndexRegionObserver.getIndexedFlowUsed());
Assert.assertTrue("Seekpoints should get added by index scanner",
IndexRegionObserver.getSeekpointAdded());
Assert.assertEquals("It should get two seek points from index scanner.", 3, IndexRegionObserver
.getMultipleSeekPoints().size());
Assert.assertTrue("Overall result should have only 1 rows", testRes.size() == 1);
}
示例10: testSingleColumnValuePartitionFilterBySettingAsAttributeToScan
import org.apache.hadoop.hbase.index.filter.SingleColumnValuePartitionFilter; //导入依赖的package包/类
@Test(timeout = 180000)
public void testSingleColumnValuePartitionFilterBySettingAsAttributeToScan() throws Exception {
HBaseAdmin admin = UTIL.getHBaseAdmin();
Configuration conf = UTIL.getConfiguration();
String userTableName = "testSingleColumnValuePartitionFilterBySettingAsAttributeToScan";
IndexedHTableDescriptor ihtd = new IndexedHTableDescriptor(userTableName);
HColumnDescriptor hcd = new HColumnDescriptor("cf1");
ihtd.addFamily(hcd);
ValuePartition vp = new SeparatorPartition("_", 3);
IndexSpecification iSpec = new IndexSpecification("idx1");
iSpec.addIndexColumn(hcd, "cq", vp, ValueType.String, 200);
ihtd.addIndex(iSpec);
admin.createTable(ihtd);
HTable table = new HTable(conf, userTableName);
byte[] value1 = "2ndFloor_solitaire_huawei_bangalore_karnataka".getBytes();
Put p = new Put("row".getBytes());
p.add("cf1".getBytes(), "cq".getBytes(), value1);
table.put(p);
p = new Put("row2".getBytes());
p.add("cf1".getBytes(), "cq".getBytes(),
"7thFloor_solitaire_huawei_bangalore_karnataka".getBytes());
table.put(p);
p = new Put("row3".getBytes());
p.add("cf1".getBytes(), "cq".getBytes(), "rrr_sss_hhh_bangalore_karnataka".getBytes());
table.put(p);
Scan scan = new Scan();
SingleIndexExpression singleIndexExpression = new SingleIndexExpression("idx1");
byte[] value = "huawei".getBytes();
Column column = new Column("cf1".getBytes(), "cq".getBytes(), vp);
EqualsExpression equalsExpression = new EqualsExpression(column, value);
singleIndexExpression.addEqualsExpression(equalsExpression);
scan.setAttribute(Constants.INDEX_EXPRESSION, IndexUtils.toBytes(singleIndexExpression));
scan.setFilter(new SingleColumnValuePartitionFilter(hcd.getName(), "cq".getBytes(),
CompareOp.EQUAL, "huawei".getBytes(), vp));
int i = 0;
ResultScanner scanner = table.getScanner(scan);
List<Result> testRes = new ArrayList<Result>();
Result[] result = scanner.next(1);
while (result != null && result.length > 0) {
testRes.add(result[0]);
i++;
result = scanner.next(1);
}
Assert.assertTrue("Index flow should get used.", IndexRegionObserver.getIndexedFlowUsed());
Assert.assertTrue("Seekpoints should get added by index scanner",
IndexRegionObserver.getSeekpointAdded());
Assert.assertEquals("It should get two seek points from index scanner.", 2, IndexRegionObserver
.getMultipleSeekPoints().size());
Assert.assertTrue("Overall result should have only 2 rows", testRes.size() == 2);
}