本文整理汇总了Java中org.apache.hadoop.hbase.index.util.ByteArrayBuilder.position方法的典型用法代码示例。如果您正苦于以下问题:Java ByteArrayBuilder.position方法的具体用法?Java ByteArrayBuilder.position怎么用?Java ByteArrayBuilder.position使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hbase.index.util.ByteArrayBuilder
的用法示例。
在下文中一共展示了ByteArrayBuilder.position方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: createCommonKeyForIndex
import org.apache.hadoop.hbase.index.util.ByteArrayBuilder; //导入方法依赖的package包/类
private byte[] createCommonKeyForIndex(byte[] regionStartKey, byte[] indexName) {
// Format for index table rowkey [Startkey for the index region] + [one 0 byte]+
// [Index name] + [Padding for the max index name] + ....
int commonKeyLength = regionStartKey.length + 1 + IndexUtils.getMaxIndexNameLength();
ByteArrayBuilder builder = ByteArrayBuilder.allocate(commonKeyLength);
// Adding the startkey for the index region and single 0 Byte.
builder.put(regionStartKey);
builder.position(builder.position() + 1);
// Adding the index name and the padding needed
builder.put(indexName);
// No need to add the padding bytes specifically. In the array all the bytes will be 0s.
return builder.array();
}
示例2: copyColumnValueToKey
import org.apache.hadoop.hbase.index.util.ByteArrayBuilder; //导入方法依赖的package包/类
private void copyColumnValueToKey(ByteArrayBuilder builder, byte[] colValue, int maxValueLength,
ValueType valueType) {
colValue = IndexUtils.changeValueAccToDataType(colValue, valueType);
builder.put(colValue);
int paddingLength = maxValueLength - colValue.length;
builder.position(builder.position() + paddingLength);
}
示例3: getIndexDeletes
import org.apache.hadoop.hbase.index.util.ByteArrayBuilder; //导入方法依赖的package包/类
private static Collection<Delete> getIndexDeletes(List<IndexSpecification> indexSpecs,
HRegion userRegion, HRegion indexRegion, Cell deleteKV) throws IOException {
Collection<Delete> indexDeletes = new LinkedHashSet<Delete>();
List<IndexSpecification> indicesToUpdate = new LinkedList<IndexSpecification>();
Multimap<Long, Cell> groupedKV =
doGetAndGroupByTS(indexSpecs, userRegion, deleteKV, indicesToUpdate);
// There can be multiple index kvs for each user kv
// So, prepare all resultant index delete kvs for this user delete kv
for (Entry<Long, Collection<Cell>> entry : groupedKV.asMap().entrySet()) {
for (IndexSpecification index : indicesToUpdate) {
ByteArrayBuilder indexRow =
IndexUtils.getIndexRowKeyHeader(index, indexRegion.getStartKey(), deleteKV.getRow());
boolean update = false;
for (ColumnQualifier cq : index.getIndexColumns()) {
Cell kvFound = null;
for (Cell kv : entry.getValue()) {
if (Bytes.equals(cq.getColumnFamily(), kv.getFamily())
&& Bytes.equals(cq.getQualifier(), kv.getQualifier())) {
kvFound = kv;
update = true;
break;
}
}
if (kvFound == null) {
indexRow.position(indexRow.position() + cq.getMaxValueLength());
} else {
IndexUtils.updateRowKeyForKV(cq, kvFound, indexRow);
}
}
if (update) {
// Append the actual row key at the end of the index row key.
indexRow.put(deleteKV.getRow());
Delete idxDelete = new Delete(indexRow.array());
if (((KeyValue) deleteKV).isDeleteType()) {
idxDelete
.deleteColumn(Constants.IDX_COL_FAMILY, Constants.IDX_COL_QUAL, entry.getKey());
} else {
idxDelete.deleteFamily(Constants.IDX_COL_FAMILY, entry.getKey());
}
indexDeletes.add(idxDelete);
}
}
}
return indexDeletes;
}
示例4: testFalsePositiveCases
import org.apache.hadoop.hbase.index.util.ByteArrayBuilder; //导入方法依赖的package包/类
@Test(timeout = 180000)
public void testFalsePositiveCases() throws Exception {
final Configuration conf = UTIL.getConfiguration();
String userTableName = "testFalsePositiveCases";
HTableDescriptor ihtd = new HTableDescriptor(userTableName);
HColumnDescriptor hcd = new HColumnDescriptor("cf1");
ihtd.addFamily(hcd);
TableIndices indices = new TableIndices();
IndexSpecification indexSpecification =
createIndexSpecification(hcd, ValueType.String, 10, new String[] { "c1" }, "idx2");
indices.addIndex(indexSpecification);
indexSpecification =
createIndexSpecification(hcd, ValueType.String, 10, new String[] { "c2" }, "idx3");
indices.addIndex(indexSpecification);
ihtd.setValue(Constants.INDEX_SPEC_KEY, indices.toByteArray());
admin.createTable(ihtd);
HTable table = new HTable(conf, "testFalsePositiveCases");
HTable idx_table = new HTable(conf, "testFalsePositiveCases_idx");
ByteArrayBuilder byteArray = new ByteArrayBuilder(33);
byteArray.put(new byte[1]);
byteArray.put(Bytes.toBytes("idx2"));
byteArray.put(new byte[14]);
byteArray.put(Bytes.toBytes("apple"));
byteArray.put(new byte[5]);
int offset = byteArray.position();
byteArray.put(Bytes.toBytes("row1"));
ByteArrayBuilder value = new ByteArrayBuilder(4);
value.put(Bytes.toBytes((short) byteArray.array().length));
value.put(Bytes.toBytes((short) offset));
Put p = new Put(byteArray.array());
p.add(Constants.IDX_COL_FAMILY, Constants.IDX_COL_QUAL, value.array());
idx_table.put(p);
SingleColumnValueFilter filter =
new SingleColumnValueFilter("cf1".getBytes(), "c1".getBytes(), CompareOp.EQUAL,
"apple".getBytes());
SingleColumnValueFilter filter1 =
new SingleColumnValueFilter("cf1".getBytes(), "c2".getBytes(), CompareOp.EQUAL,
"bat".getBytes());
FilterList masterFilter = new FilterList(Operator.MUST_PASS_ONE);
masterFilter.addFilter(filter1);
masterFilter.addFilter(filter);
Scan scan = new Scan();
scan.setFilter(masterFilter);
int i = 0;
ResultScanner scanner = table.getScanner(scan);
List<Result> testRes = new ArrayList<Result>();
Result[] result = scanner.next(1);
while (result != null && result.length > 0) {
testRes.add(result[0]);
i++;
result = scanner.next(1);
}
assertTrue("Index flow should get used.", IndexRegionObserver.getIndexedFlowUsed());
assertTrue("Seekpoints should get added by index scanner",
IndexRegionObserver.getSeekpointAdded());
assertEquals("It should get two seek points from index scanner.", 1, IndexRegionObserver
.getMultipleSeekPoints().size());
assertEquals("Overall result should have only 2 rows", 0, testRes.size());
}
示例5: getIndexDeletes
import org.apache.hadoop.hbase.index.util.ByteArrayBuilder; //导入方法依赖的package包/类
private static Collection<Delete> getIndexDeletes(List<IndexSpecification> indexSpecs,
HRegion userRegion, HRegion indexRegion, KeyValue deleteKV) throws IOException {
Collection<Delete> indexDeletes = new LinkedHashSet<Delete>();
List<IndexSpecification> indicesToUpdate = new LinkedList<IndexSpecification>();
Multimap<Long, KeyValue> groupedKV =
doGetAndGroupByTS(indexSpecs, userRegion, deleteKV, indicesToUpdate);
// There can be multiple index kvs for each user kv
// So, prepare all resultant index delete kvs for this user delete kv
for (Entry<Long, Collection<KeyValue>> entry : groupedKV.asMap().entrySet()) {
for (IndexSpecification index : indicesToUpdate) {
ByteArrayBuilder indexRow =
IndexUtils.getIndexRowKeyHeader(index, indexRegion.getStartKey(), deleteKV.getRow());
boolean update = false;
for (ColumnQualifier cq : index.getIndexColumns()) {
KeyValue kvFound = null;
for (KeyValue kv : entry.getValue()) {
if (Bytes.equals(cq.getColumnFamily(), kv.getFamily())
&& Bytes.equals(cq.getQualifier(), kv.getQualifier())) {
kvFound = kv;
update = true;
break;
}
}
if (kvFound == null) {
indexRow.position(indexRow.position() + cq.getMaxValueLength());
} else {
IndexUtils.updateRowKeyForKV(cq, kvFound, indexRow);
}
}
if (update) {
// Append the actual row key at the end of the index row key.
indexRow.put(deleteKV.getRow());
Delete idxDelete = new Delete(indexRow.array());
if (deleteKV.isDeleteType()) {
idxDelete
.deleteColumn(Constants.IDX_COL_FAMILY, Constants.IDX_COL_QUAL, entry.getKey());
} else {
idxDelete.deleteFamily(Constants.IDX_COL_FAMILY, entry.getKey());
}
idxDelete.setWriteToWAL(false);
indexDeletes.add(idxDelete);
}
}
}
return indexDeletes;
}
示例6: testFalsePositiveCases
import org.apache.hadoop.hbase.index.util.ByteArrayBuilder; //导入方法依赖的package包/类
@Test(timeout = 180000)
public void testFalsePositiveCases() throws Exception {
HBaseAdmin admin = UTIL.getHBaseAdmin();
ZooKeeperWatcher zkw = HBaseTestingUtility.getZooKeeperWatcher(UTIL);
final Configuration conf = UTIL.getConfiguration();
String userTableName = "testFalsePositiveCases";
IndexedHTableDescriptor ihtd = new IndexedHTableDescriptor(userTableName);
HColumnDescriptor hcd = new HColumnDescriptor("cf1");
ihtd.addFamily(hcd);
IndexSpecification indexSpecification =
createIndexSpecification(hcd, ValueType.String, 10, new String[] { "c1" }, "idx2");
ihtd.addIndex(indexSpecification);
indexSpecification =
createIndexSpecification(hcd, ValueType.String, 10, new String[] { "c2" }, "idx3");
ihtd.addIndex(indexSpecification);
admin.createTable(ihtd);
ZKAssign.blockUntilNoRIT(zkw);
HTable table = new HTable(conf, "testFalsePositiveCases");
HTable idx_table = new HTable(conf, "testFalsePositiveCases_idx");
ByteArrayBuilder byteArray = new ByteArrayBuilder(33);
byteArray.put(new byte[1]);
byteArray.put(Bytes.toBytes("idx2"));
byteArray.put(new byte[14]);
byteArray.put(Bytes.toBytes("apple"));
byteArray.put(new byte[5]);
int offset = byteArray.position();
byteArray.put(Bytes.toBytes("row1"));
ByteArrayBuilder value = new ByteArrayBuilder(4);
value.put(Bytes.toBytes((short) byteArray.array().length));
value.put(Bytes.toBytes((short) offset));
Put p = new Put(byteArray.array());
p.add(Constants.IDX_COL_FAMILY, Constants.IDX_COL_QUAL, value.array());
idx_table.put(p);
SingleColumnValueFilter filter =
new SingleColumnValueFilter("cf1".getBytes(), "c1".getBytes(), CompareOp.EQUAL,
"apple".getBytes());
SingleColumnValueFilter filter1 =
new SingleColumnValueFilter("cf1".getBytes(), "c2".getBytes(), CompareOp.EQUAL,
"bat".getBytes());
FilterList masterFilter = new FilterList(Operator.MUST_PASS_ONE);
masterFilter.addFilter(filter1);
masterFilter.addFilter(filter);
Scan scan = new Scan();
scan.setFilter(masterFilter);
int i = 0;
ResultScanner scanner = table.getScanner(scan);
List<Result> testRes = new ArrayList<Result>();
Result[] result = scanner.next(1);
while (result != null && result.length > 0) {
testRes.add(result[0]);
i++;
result = scanner.next(1);
}
Assert.assertTrue("Index flow should get used.", IndexRegionObserver.getIndexedFlowUsed());
Assert.assertTrue("Seekpoints should get added by index scanner",
IndexRegionObserver.getSeekpointAdded());
Assert.assertEquals("It should get two seek points from index scanner.", 1, IndexRegionObserver
.getMultipleSeekPoints().size());
Assert.assertEquals("Overall result should have only 2 rows", 0, testRes.size());
}