本文整理汇总了Java中org.apache.hadoop.hbase.index.IndexSpecification类的典型用法代码示例。如果您正苦于以下问题:Java IndexSpecification类的具体用法?Java IndexSpecification怎么用?Java IndexSpecification使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
IndexSpecification类属于org.apache.hadoop.hbase.index包,在下文中一共展示了IndexSpecification类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: prepareIndexMutations
import org.apache.hadoop.hbase.index.IndexSpecification; //导入依赖的package包/类
private void prepareIndexMutations(List<IndexSpecification> indices, HRegion userRegion,
Mutation mutation, String tableName, HRegion indexRegion) throws IOException {
IndexEdits indexEdits = threadLocal.get();
if (mutation instanceof Put) {
for (IndexSpecification index : indices) {
// Handle each of the index
Mutation indexPut = IndexUtils.prepareIndexPut((Put) mutation, index, indexRegion);
if (null != indexPut) {
// This mutation can be null when the user table mutation is not
// containing all of the indexed col value.
indexEdits.add(indexPut);
}
}
} else if (mutation instanceof Delete) {
Collection<? extends Mutation> indexDeletes =
prepareIndexDeletes((Delete) mutation, userRegion, indices, indexRegion);
indexEdits.addAll(indexDeletes);
} else {
// TODO : Log or throw exception
}
}
示例2: evalFilterForIndexSelection
import org.apache.hadoop.hbase.index.IndexSpecification; //导入依赖的package包/类
FilterNode evalFilterForIndexSelection(Filter filter, List<IndexSpecification> indices) {
if (filter instanceof FilterList) {
FilterList fList = (FilterList) filter;
GroupingCondition condition =
(fList.getOperator() == Operator.MUST_PASS_ALL) ? GroupingCondition.AND
: GroupingCondition.OR;
NonLeafFilterNode nonLeafFilterNode = new NonLeafFilterNode(condition);
List<Filter> filters = fList.getFilters();
for (Filter fltr : filters) {
FilterNode node = evalFilterForIndexSelection(fltr, indices);
nonLeafFilterNode.addFilterNode(node);
}
return handleNonLeafFilterNode(nonLeafFilterNode);
} else if (filter instanceof SingleColumnValueFilter) {
// Check for the availability of index
return selectBestFitAndPossibleIndicesForSCVF(indices, (SingleColumnValueFilter) filter);
} else if (filter instanceof SingleColumnRangeFilter) {
return selectBestFitAndPossibleIndicesForSCRF(indices, (SingleColumnRangeFilter) filter);
}
return new NoIndexFilterNode();
}
示例3: handleORCondition
import org.apache.hadoop.hbase.index.IndexSpecification; //导入依赖的package包/类
private FilterNode handleORCondition(NonLeafFilterNode nonLeafFilterNode) {
Iterator<FilterNode> nonLeafFilterNodeItr = nonLeafFilterNode.getFilterNodes().iterator();
while (nonLeafFilterNodeItr.hasNext()) {
FilterNode filterNode = nonLeafFilterNodeItr.next();
if (filterNode instanceof IndexFilterNode) {
FilterColumnValueDetail filterColumnValueDetail =
((IndexFilterNode) filterNode).getFilterColumnValueDetail();
IndexSpecification indexToUse = ((IndexFilterNode) filterNode).getBestIndex();
nonLeafFilterNode.addIndicesToUse(filterColumnValueDetail, indexToUse);
nonLeafFilterNodeItr.remove();
} else if (filterNode instanceof PossibleIndexFilterNode
|| filterNode instanceof NoIndexFilterNode) {
// The moment an OR condition contains a column on which there is no index which can be
// used, the entire OR node becomes as non indexed.
return new NoIndexFilterNode();
}
// A NonLeafFilterNode under the OR node need to be kept as it is.
}
return nonLeafFilterNode;
}
示例4: selectBestFitAndPossibleIndicesForSCVF
import org.apache.hadoop.hbase.index.IndexSpecification; //导入依赖的package包/类
private FilterNode selectBestFitAndPossibleIndicesForSCVF(List<IndexSpecification> indices,
SingleColumnValueFilter filter) {
if (CompareOp.NOT_EQUAL == filter.getOperator() || CompareOp.NO_OP == filter.getOperator()) {
return new NoIndexFilterNode();
}
FilterColumnValueDetail detail = null;
if (filter instanceof SingleColumnValuePartitionFilter) {
SingleColumnValuePartitionFilter escvf = (SingleColumnValuePartitionFilter) filter;
detail =
new FilterColumnValueDetail(escvf.getFamily(), escvf.getQualifier(), escvf
.getComparator().getValue(), escvf.getValuePartition(), escvf.getOperator());
} else {
detail =
new FilterColumnValueDetail(filter.getFamily(), filter.getQualifier(), filter
.getComparator().getValue(), filter.getOperator());
}
return selectBestFitIndexForColumn(indices, detail);
}
示例5: testAddIndexForTable
import org.apache.hadoop.hbase.index.IndexSpecification; //导入依赖的package包/类
public void testAddIndexForTable() throws Exception {
IndexManager im = IndexManager.getInstance();
assertNotNull("Index Manager should not be null.", im);
List<IndexSpecification> indexList = new ArrayList<IndexSpecification>(1);
IndexSpecification iSpec = new IndexSpecification("index_name");
iSpec.addIndexColumn(new HColumnDescriptor("cf"), "cq", null, 10);
indexList.add(iSpec);
im.addIndexForTable("index_name", indexList);
indexList = im.getIndicesForTable("index_name");
assertEquals("Index name should be equal with actual value.", "index_name", indexList.get(0)
.getName());
assertTrue("Column qualifier state mismatch.",
indexList.get(0).getIndexColumns().contains(new ColumnQualifier("cf", "cq", null, 10)));
}
示例6: testPreCreateShouldNotBeSuccessfulIfIndicesAreNotSameAtBothTypeAndLength
import org.apache.hadoop.hbase.index.IndexSpecification; //导入依赖的package包/类
@Test(timeout = 180000)
public void testPreCreateShouldNotBeSuccessfulIfIndicesAreNotSameAtBothTypeAndLength()
throws IOException, KeeperException, InterruptedException {
String userTableName = "testNotConsisIndex4";
HTableDescriptor ihtd = new HTableDescriptor(TableName.valueOf(userTableName));
HColumnDescriptor hcd = new HColumnDescriptor("col");
IndexSpecification iSpec1 = new IndexSpecification("Index1");
iSpec1.addIndexColumn(hcd, "q1", ValueType.String, 10);
iSpec1.addIndexColumn(hcd, "q2", ValueType.String, 10);
ihtd.addFamily(hcd);
IndexSpecification iSpec2 = new IndexSpecification("Index2");
iSpec2.addIndexColumn(hcd, "q1", ValueType.Int, 10);
iSpec2.addIndexColumn(hcd, "q2", ValueType.String, 7);
TableIndices indices = new TableIndices();
indices.addIndex(iSpec1);
indices.addIndex(iSpec2);
ihtd.setValue(Constants.INDEX_SPEC_KEY, indices.toByteArray());
boolean returnVal = false;
try {
admin.createTable(ihtd);
fail("IOException should be thrown");
} catch (IOException e) {
returnVal = true;
}
Assert.assertTrue(returnVal);
}
示例7: testPreCreateShouldBeSuccessfulIfIndicesAreSame
import org.apache.hadoop.hbase.index.IndexSpecification; //导入依赖的package包/类
@Test(timeout = 180000)
public void testPreCreateShouldBeSuccessfulIfIndicesAreSame() throws IOException,
KeeperException, InterruptedException {
String userTableName = "testConsistIndex";
HTableDescriptor ihtd = new HTableDescriptor(TableName.valueOf(userTableName));
HColumnDescriptor hcd = new HColumnDescriptor("col");
IndexSpecification iSpec1 = new IndexSpecification("Index1");
iSpec1.addIndexColumn(hcd, "q1", ValueType.String, 10);
ihtd.addFamily(hcd);
IndexSpecification iSpec2 = new IndexSpecification("Index2");
iSpec2.addIndexColumn(hcd, "q1", ValueType.String, 10);
TableIndices indices = new TableIndices();
indices.addIndex(iSpec1);
indices.addIndex(iSpec2);
ihtd.setValue(Constants.INDEX_SPEC_KEY, indices.toByteArray());
try {
admin.createTable(ihtd);
} catch (IOException e) {
fail("Exception should not be thrown");
}
}
示例8: testIndexPutWithOffsetAndLength
import org.apache.hadoop.hbase.index.IndexSpecification; //导入依赖的package包/类
@Test(timeout = 180000)
public void testIndexPutWithOffsetAndLength() throws IOException {
Path basedir = new Path(DIR + "TestIndexPut");
Configuration conf = TEST_UTIL.getConfiguration();
HTableDescriptor htd =
new HTableDescriptor(TableName.valueOf("testIndexPutWithOffsetAndLength"));
HRegionInfo info = new HRegionInfo(htd.getTableName(), "ABC".getBytes(), "BBB".getBytes(), false);
HRegion region = HRegion.createHRegion(info, basedir, conf, htd);
IndexSpecification spec = new IndexSpecification("index");
spec.addIndexColumn(new HColumnDescriptor("col"), "ql1", new SpatialPartition(20, 2),
ValueType.String, 18);
byte[] value1 = "AB---CD---EF---GH---IJ---KL---MN---OP---".getBytes();
Put p = new Put("row".getBytes());
p.add("col".getBytes(), "ql1".getBytes(), value1);
Put indexPut = IndexUtils.prepareIndexPut(p, spec, region);
byte[] indexRowKey = indexPut.getRow();
byte[] actualResult = new byte[2];
System.arraycopy(indexRowKey, 22, actualResult, 0, actualResult.length);
byte[] expectedResult = new byte[2];
System.arraycopy("IJ".getBytes(), 0, expectedResult, 0, "IJ".getBytes().length);
Assert.assertTrue(Bytes.equals(actualResult, expectedResult));
}
示例9: testIndexPutwithPositiveIntDataTypes
import org.apache.hadoop.hbase.index.IndexSpecification; //导入依赖的package包/类
@Test(timeout = 180000)
public void testIndexPutwithPositiveIntDataTypes() throws IOException {
Path basedir = new Path(DIR + "TestIndexPut");
Configuration conf = TEST_UTIL.getConfiguration();
HTableDescriptor htd =
new HTableDescriptor(TableName.valueOf("testIndexPutwithPositiveIntDataTypes"));
HRegionInfo info = new HRegionInfo(htd.getTableName(), "ABC".getBytes(), "BBB".getBytes(), false);
// HLog hlog = UTIL.getMiniHBaseCluster().getRegionServer(0).getWAL();
HRegion region = HRegion.createHRegion(info, basedir, conf, htd);
IndexSpecification spec = new IndexSpecification("index");
spec.addIndexColumn(new HColumnDescriptor("col"), "ql1", ValueType.Int, 4);
spec.addIndexColumn(new HColumnDescriptor("col"), "ql2", ValueType.Float, 4);
byte[] value1 = Bytes.toBytes(1000);
Put p = new Put("row".getBytes());
p.add("col".getBytes(), "ql1".getBytes(), value1);
Put indexPut1 = IndexUtils.prepareIndexPut(p, spec, region);
int a = 1000;
byte[] expectedResult = Bytes.toBytes(a ^ (1 << 31));
byte[] actualResult = new byte[4];
byte[] indexRowKey = indexPut1.getRow();
System.arraycopy(indexRowKey, 22, actualResult, 0, actualResult.length);
Assert.assertTrue(Bytes.equals(expectedResult, actualResult));
}
示例10: testIndexPutWithNegativeIntDataTypes
import org.apache.hadoop.hbase.index.IndexSpecification; //导入依赖的package包/类
@Test(timeout = 180000)
public void testIndexPutWithNegativeIntDataTypes() throws IOException {
Path basedir = new Path(DIR + "TestIndexPut");
Configuration conf = TEST_UTIL.getConfiguration();
HTableDescriptor htd =
new HTableDescriptor(TableName.valueOf("testIndexPutWithNegativeIntDataTypes"));
HRegionInfo info = new HRegionInfo(htd.getTableName(), "ABC".getBytes(), "BBB".getBytes(), false);
// HLog hlog = UTIL.getMiniHBaseCluster().getRegionServer(0).getWAL();
HRegion region = HRegion.createHRegion(info, basedir, conf, htd);
IndexSpecification spec = new IndexSpecification("index");
spec.addIndexColumn(new HColumnDescriptor("col"), "ql1", ValueType.Int, 4);
spec.addIndexColumn(new HColumnDescriptor("col"), "ql2", ValueType.Float, 4);
byte[] value1 = Bytes.toBytes(-2562351);
Put p = new Put("row".getBytes());
p.add("col".getBytes(), "ql1".getBytes(), value1);
Put indexPut = IndexUtils.prepareIndexPut(p, spec, region);
int a = -2562351;
byte[] expectedResult = Bytes.toBytes(a ^ (1 << 31));
byte[] actualResult = new byte[4];
byte[] indexRowKey = indexPut.getRow();
System.arraycopy(indexRowKey, 22, actualResult, 0, actualResult.length);
Assert.assertTrue(Bytes.equals(expectedResult, actualResult));
}
示例11: testIndexPutWithLongDataTypes
import org.apache.hadoop.hbase.index.IndexSpecification; //导入依赖的package包/类
@Test(timeout = 180000)
public void testIndexPutWithLongDataTypes() throws IOException {
Path basedir = new Path(DIR + "TestIndexPut");
Configuration conf = TEST_UTIL.getConfiguration();
HTableDescriptor htd = new HTableDescriptor("testIndexPutWithNegativeIntDataTypes");
HRegionInfo info = new HRegionInfo(htd.getTableName(), "ABC".getBytes(), "BBB".getBytes(), false);
// HLog hlog = UTIL.getMiniHBaseCluster().getRegionServer(0).getWAL();
HRegion region = HRegion.createHRegion(info, basedir, conf, htd);
IndexSpecification spec = new IndexSpecification("index");
spec.addIndexColumn(new HColumnDescriptor("col"), "ql1", ValueType.Long, 4);
byte[] value1 = Bytes.toBytes(-2562351L);
Put p = new Put("row".getBytes());
p.add("col".getBytes(), "ql1".getBytes(), value1);
Put indexPut = IndexUtils.prepareIndexPut(p, spec, region);
long a = -2562351L;
byte[] expectedResult = Bytes.toBytes(a ^ (1L << 63));
byte[] actualResult = new byte[8];
byte[] indexRowKey = indexPut.getRow();
System.arraycopy(indexRowKey, 22, actualResult, 0, actualResult.length);
Assert.assertTrue(Bytes.equals(expectedResult, actualResult));
}
示例12: testSingleIndexExpressionWithOneEqualsExpression
import org.apache.hadoop.hbase.index.IndexSpecification; //导入依赖的package包/类
@Test
public void testSingleIndexExpressionWithOneEqualsExpression() throws Exception {
String indexName = "idx1";
SingleIndexExpression singleIndexExpression = new SingleIndexExpression(indexName);
byte[] value = "1".getBytes();
Column column = new Column(FAMILY1, QUALIFIER1);
EqualsExpression equalsExpression = new EqualsExpression(column, value);
singleIndexExpression.addEqualsExpression(equalsExpression);
Scan scan = new Scan();
scan.setAttribute(Constants.INDEX_EXPRESSION, IndexUtils.toBytes(singleIndexExpression));
Filter filter = new SingleColumnValueFilter(FAMILY1, QUALIFIER1, CompareOp.EQUAL, value);
scan.setFilter(filter);
ScanFilterEvaluator evaluator = new ScanFilterEvaluator();
List<IndexSpecification> indices = new ArrayList<IndexSpecification>();
IndexSpecification index = new IndexSpecification(indexName);
HColumnDescriptor colDesc = new HColumnDescriptor(FAMILY1);
index.addIndexColumn(colDesc, COL1, ValueType.String, 10);
indices.add(index);
HRegion region =
initHRegion(tableName.getBytes(), null, null,
"testSingleIndexExpressionWithOneEqualsExpression", TEST_UTIL.getConfiguration(), FAMILY1);
IndexRegionScanner scanner = evaluator.evaluate(scan, indices, new byte[0], region, tableName);
// TODO add assertions
}
示例13: testPostOpenCoprocessor
import org.apache.hadoop.hbase.index.IndexSpecification; //导入依赖的package包/类
@Test(timeout = 180000)
public void testPostOpenCoprocessor() throws IOException, KeeperException, InterruptedException {
String userTableName = "testPostOpenCoprocessor";
HTableDescriptor ihtd =
TestUtils.createIndexedHTableDescriptor(userTableName, "col", "Index1", "col", "ql");
admin.createTable(ihtd);
// Check the number of indices
List<IndexSpecification> list = IndexManager.getInstance().getIndicesForTable(userTableName);
Assert.assertEquals(1, list.size());
// Check the index name
boolean bool = false;
for (IndexSpecification e : list) {
if (e.getName().equals("Index1")) bool = true;
}
Assert.assertTrue(bool);
}
示例14: getIndex
import org.apache.hadoop.hbase.index.IndexSpecification; //导入依赖的package包/类
/**
* @param tableName
* @param indexName
* @return index specification
*/
public IndexSpecification getIndex(String tableName, byte[] indexName) {
Map<byte[], IndexSpecification> indices = this.tableIndexMap.get(tableName);
if (indices != null) {
return indices.get(indexName);
}
return null;
}
示例15: preWALWrite
import org.apache.hadoop.hbase.index.IndexSpecification; //导入依赖的package包/类
@Override
public boolean preWALWrite(ObserverContext<WALCoprocessorEnvironment> ctx, HRegionInfo info,
HLogKey logKey, WALEdit logEdit) throws IOException {
TableName tableName = info.getTable();
if (IndexUtils.isCatalogOrSystemTable(tableName) || IndexUtils.isIndexTable(tableName)) {
return true;
}
List<IndexSpecification> indices = indexManager.getIndicesForTable(tableName.getNameAsString());
if (indices != null && !indices.isEmpty()) {
LOG.trace("Entering preWALWrite for the table " + tableName);
String indexTableName = IndexUtils.getIndexTableName(tableName);
IndexEdits iEdits = IndexRegionObserver.threadLocal.get();
WALEdit indexWALEdit = iEdits.getWALEdit();
// This size will be 0 when none of the Mutations to the user table to be indexed.
// or write to WAL is disabled for the Mutations
if (indexWALEdit.getKeyValues().size() == 0) {
return true;
}
LOG.trace("Adding indexWALEdits into WAL for table " + tableName);
HRegion indexRegion = iEdits.getRegion();
// TS in all KVs within WALEdit will be the same. So considering the 1st one.
Long time = indexWALEdit.getKeyValues().get(0).getTimestamp();
indexRegion.getLog().appendNoSync(indexRegion.getRegionInfo(),
TableName.valueOf(indexTableName), indexWALEdit, logKey.getClusterIds(), time,
indexRegion.getTableDesc(), indexRegion.getSequenceId(), true, HConstants.NO_NONCE,
HConstants.NO_NONCE);
LOG.trace("Exiting preWALWrite for the table " + tableName);
}
return true;
}