本文整理汇总了Java中org.apache.hadoop.hbase.index.util.ByteArrayBuilder类的典型用法代码示例。如果您正苦于以下问题:Java ByteArrayBuilder类的具体用法?Java ByteArrayBuilder怎么用?Java ByteArrayBuilder使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
ByteArrayBuilder类属于org.apache.hadoop.hbase.index.util包,在下文中一共展示了ByteArrayBuilder类的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: addIndexForTable
import org.apache.hadoop.hbase.index.util.ByteArrayBuilder; //导入依赖的package包/类
/**
* @param tableName on which index applying
* @param indexList list of table
*/
public void addIndexForTable(String tableName, List<IndexSpecification> indexList) {
this.tableVsIndices.put(tableName, indexList);
// TODO the inner map needs to be thread safe when we support dynamic index add/remove
Map<byte[], IndexSpecification> indexMap =
new TreeMap<byte[], IndexSpecification>(Bytes.BYTES_COMPARATOR);
for (IndexSpecification index : indexList) {
ByteArrayBuilder keyBuilder = ByteArrayBuilder.allocate(IndexUtils.getMaxIndexNameLength());
keyBuilder.put(Bytes.toBytes(index.getName()));
indexMap.put(keyBuilder.array(), index);
}
this.tableIndexMap.put(tableName, indexMap);
}
示例2: createRegionScanner
import org.apache.hadoop.hbase.index.util.ByteArrayBuilder; //导入依赖的package包/类
private void createRegionScanner(HRegion indexRegion, String userTableName,
List<IndexRegionScanner> scanners, ByteArrayBuilder indexNameBuilder, Scan scan,
boolean isRange, int scannerIndex) throws IOException {
RegionScanner scannerForIndexRegion = indexRegion.getScanner(scan);
LeafIndexRegionScanner leafIndexRegionScanner =
new LeafIndexRegionScanner(IndexManager.getInstance().getIndex(userTableName,
indexNameBuilder.array()), scannerForIndexRegion, new TTLExpiryChecker());
leafIndexRegionScanner.setScannerIndex(scannerIndex);
leafIndexRegionScanner.setRangeFlag(isRange);
scanners.add(leafIndexRegionScanner);
}
示例3: createCommonKeyForIndex
import org.apache.hadoop.hbase.index.util.ByteArrayBuilder; //导入依赖的package包/类
private byte[] createCommonKeyForIndex(byte[] regionStartKey, byte[] indexName) {
// Format for index table rowkey [Startkey for the index region] + [one 0 byte]+
// [Index name] + [Padding for the max index name] + ....
int commonKeyLength = regionStartKey.length + 1 + IndexUtils.getMaxIndexNameLength();
ByteArrayBuilder builder = ByteArrayBuilder.allocate(commonKeyLength);
// Adding the startkey for the index region and single 0 Byte.
builder.put(regionStartKey);
builder.position(builder.position() + 1);
// Adding the index name and the padding needed
builder.put(indexName);
// No need to add the padding bytes specifically. In the array all the bytes will be 0s.
return builder.array();
}
示例4: copyColumnValueToKey
import org.apache.hadoop.hbase.index.util.ByteArrayBuilder; //导入依赖的package包/类
private void copyColumnValueToKey(ByteArrayBuilder builder, byte[] colValue, int maxValueLength,
ValueType valueType) {
colValue = IndexUtils.changeValueAccToDataType(colValue, valueType);
builder.put(colValue);
int paddingLength = maxValueLength - colValue.length;
builder.position(builder.position() + paddingLength);
}
示例5: formIndexNameFromKV
import org.apache.hadoop.hbase.index.util.ByteArrayBuilder; //导入依赖的package包/类
private byte[] formIndexNameFromKV(KeyValue kv) {
byte[] rowKey = kv.getRow();
// First two bytes are going to be the
ByteArrayBuilder keyBuilder = ByteArrayBuilder.allocate(rowKey.length);
// Start from 2nd offset because the first 2 bytes corresponds to the rowkeylength
keyBuilder.put(rowKey, 0, rowKey.length);
String replacedKey = Bytes.toString(keyBuilder.array());
String emptyByte = Bytes.toString(new byte[1]);
int indexOf = replacedKey.indexOf(emptyByte);
return keyBuilder.array(indexOf + 1, IndexUtils.getMaxIndexNameLength());
}
示例6: addIndexForTable
import org.apache.hadoop.hbase.index.util.ByteArrayBuilder; //导入依赖的package包/类
/**
* @param table name on which index applying
* @param IndexSpecification list of table
*/
public void addIndexForTable(String tableName, List<IndexSpecification> indexList) {
this.tableVsIndices.put(tableName, indexList);
// TODO the inner map needs to be thread safe when we support dynamic index add/remove
Map<byte[], IndexSpecification> indexMap =
new TreeMap<byte[], IndexSpecification>(Bytes.BYTES_COMPARATOR);
for (IndexSpecification index : indexList) {
ByteArrayBuilder keyBuilder = ByteArrayBuilder.allocate(IndexUtils.getMaxIndexNameLength());
keyBuilder.put(Bytes.toBytes(index.getName()));
indexMap.put(keyBuilder.array(), index);
}
this.tableIndexMap.put(tableName, indexMap);
}
示例7: formIndexNameFromKV
import org.apache.hadoop.hbase.index.util.ByteArrayBuilder; //导入依赖的package包/类
private byte[] formIndexNameFromKV(KeyValue kv) {
byte[] rowKey = kv.getRow();
// First two bytes are going to be the
ByteArrayBuilder keyBuilder = ByteArrayBuilder.allocate(rowKey.length);
// Start from 2nd offset because the first 2 bytes corresponds to the rowkeylength
keyBuilder.put(rowKey, 0, rowKey.length);
int indexOf = com.google.common.primitives.Bytes.indexOf(keyBuilder.array(), new byte[1]);
return keyBuilder.array(indexOf + 1, IndexUtils.getMaxIndexNameLength());
}
示例8: getIndexDeletes
import org.apache.hadoop.hbase.index.util.ByteArrayBuilder; //导入依赖的package包/类
private static Collection<Delete> getIndexDeletes(List<IndexSpecification> indexSpecs,
HRegion userRegion, HRegion indexRegion, Cell deleteKV) throws IOException {
Collection<Delete> indexDeletes = new LinkedHashSet<Delete>();
List<IndexSpecification> indicesToUpdate = new LinkedList<IndexSpecification>();
Multimap<Long, Cell> groupedKV =
doGetAndGroupByTS(indexSpecs, userRegion, deleteKV, indicesToUpdate);
// There can be multiple index kvs for each user kv
// So, prepare all resultant index delete kvs for this user delete kv
for (Entry<Long, Collection<Cell>> entry : groupedKV.asMap().entrySet()) {
for (IndexSpecification index : indicesToUpdate) {
ByteArrayBuilder indexRow =
IndexUtils.getIndexRowKeyHeader(index, indexRegion.getStartKey(), deleteKV.getRow());
boolean update = false;
for (ColumnQualifier cq : index.getIndexColumns()) {
Cell kvFound = null;
for (Cell kv : entry.getValue()) {
if (Bytes.equals(cq.getColumnFamily(), kv.getFamily())
&& Bytes.equals(cq.getQualifier(), kv.getQualifier())) {
kvFound = kv;
update = true;
break;
}
}
if (kvFound == null) {
indexRow.position(indexRow.position() + cq.getMaxValueLength());
} else {
IndexUtils.updateRowKeyForKV(cq, kvFound, indexRow);
}
}
if (update) {
// Append the actual row key at the end of the index row key.
indexRow.put(deleteKV.getRow());
Delete idxDelete = new Delete(indexRow.array());
if (((KeyValue) deleteKV).isDeleteType()) {
idxDelete
.deleteColumn(Constants.IDX_COL_FAMILY, Constants.IDX_COL_QUAL, entry.getKey());
} else {
idxDelete.deleteFamily(Constants.IDX_COL_FAMILY, entry.getKey());
}
indexDeletes.add(idxDelete);
}
}
}
return indexDeletes;
}
示例9: testFalsePositiveCases
import org.apache.hadoop.hbase.index.util.ByteArrayBuilder; //导入依赖的package包/类
@Test(timeout = 180000)
public void testFalsePositiveCases() throws Exception {
final Configuration conf = UTIL.getConfiguration();
String userTableName = "testFalsePositiveCases";
HTableDescriptor ihtd = new HTableDescriptor(userTableName);
HColumnDescriptor hcd = new HColumnDescriptor("cf1");
ihtd.addFamily(hcd);
TableIndices indices = new TableIndices();
IndexSpecification indexSpecification =
createIndexSpecification(hcd, ValueType.String, 10, new String[] { "c1" }, "idx2");
indices.addIndex(indexSpecification);
indexSpecification =
createIndexSpecification(hcd, ValueType.String, 10, new String[] { "c2" }, "idx3");
indices.addIndex(indexSpecification);
ihtd.setValue(Constants.INDEX_SPEC_KEY, indices.toByteArray());
admin.createTable(ihtd);
HTable table = new HTable(conf, "testFalsePositiveCases");
HTable idx_table = new HTable(conf, "testFalsePositiveCases_idx");
ByteArrayBuilder byteArray = new ByteArrayBuilder(33);
byteArray.put(new byte[1]);
byteArray.put(Bytes.toBytes("idx2"));
byteArray.put(new byte[14]);
byteArray.put(Bytes.toBytes("apple"));
byteArray.put(new byte[5]);
int offset = byteArray.position();
byteArray.put(Bytes.toBytes("row1"));
ByteArrayBuilder value = new ByteArrayBuilder(4);
value.put(Bytes.toBytes((short) byteArray.array().length));
value.put(Bytes.toBytes((short) offset));
Put p = new Put(byteArray.array());
p.add(Constants.IDX_COL_FAMILY, Constants.IDX_COL_QUAL, value.array());
idx_table.put(p);
SingleColumnValueFilter filter =
new SingleColumnValueFilter("cf1".getBytes(), "c1".getBytes(), CompareOp.EQUAL,
"apple".getBytes());
SingleColumnValueFilter filter1 =
new SingleColumnValueFilter("cf1".getBytes(), "c2".getBytes(), CompareOp.EQUAL,
"bat".getBytes());
FilterList masterFilter = new FilterList(Operator.MUST_PASS_ONE);
masterFilter.addFilter(filter1);
masterFilter.addFilter(filter);
Scan scan = new Scan();
scan.setFilter(masterFilter);
int i = 0;
ResultScanner scanner = table.getScanner(scan);
List<Result> testRes = new ArrayList<Result>();
Result[] result = scanner.next(1);
while (result != null && result.length > 0) {
testRes.add(result[0]);
i++;
result = scanner.next(1);
}
assertTrue("Index flow should get used.", IndexRegionObserver.getIndexedFlowUsed());
assertTrue("Seekpoints should get added by index scanner",
IndexRegionObserver.getSeekpointAdded());
assertEquals("It should get two seek points from index scanner.", 1, IndexRegionObserver
.getMultipleSeekPoints().size());
assertEquals("Overall result should have only 2 rows", 0, testRes.size());
}
示例10: genSomeKeys
import org.apache.hadoop.hbase.index.util.ByteArrayBuilder; //导入依赖的package包/类
private List<KeyValue> genSomeKeys(String userTableName) throws Exception {
List<KeyValue> ret = new ArrayList<KeyValue>(4);
HTableDescriptor ihtd = new HTableDescriptor(TableName.valueOf(userTableName));
HColumnDescriptor hcd1 = new HColumnDescriptor("column1");
HColumnDescriptor hcd2 = new HColumnDescriptor("column2");
IndexSpecification iSpec1 = new IndexSpecification("Index");
iSpec1.addIndexColumn(hcd1, "q", ValueType.String, 10);
iSpec1.addIndexColumn(hcd2, "q", ValueType.String, 10);
ihtd.addFamily(hcd1);
ihtd.addFamily(hcd2);
TableIndices indices = new TableIndices();
indices.addIndex(iSpec1);
ihtd.setValue(Constants.INDEX_SPEC_KEY, indices.toByteArray());
admin.createTable(ihtd);
ByteArrayBuilder indexColVal = ByteArrayBuilder.allocate(4);
indexColVal.put(Bytes.toBytes((short) 3));
indexColVal.put(Bytes.toBytes((short) 32));
Put p1 = generatePuts("006".getBytes(), "05".getBytes());
Put p2 = generatePuts("003".getBytes(), "06".getBytes());
Put p3 = generatePuts("004".getBytes(), "06".getBytes());
Put p4 = generatePuts("007".getBytes(), "06".getBytes());
byte[] seekToPut =
new byte[3 + 1 + IndexUtils.getMaxIndexNameLength() + 10 + "006".getBytes().length];
System.arraycopy(p1.getRow(), 0, seekToPut, 0, p1.getRow().length);
byte[] seekToRow = "007".getBytes();
System.arraycopy(seekToRow, 0, seekToPut, p1.getRow().length - 3, seekToRow.length);
System.arraycopy("005".getBytes(), 0, seekToPut, 0, 3);
setSeekToRowKey(seekToPut, indexColVal);
byte[] expectedPut =
new byte[3 + 1 + IndexUtils.getMaxIndexNameLength() + 10 + "006".getBytes().length];
System.arraycopy(p4.getRow(), 0, expectedPut, 0, p4.getRow().length);
// Copy first 3 bytes to splitKey since getKeyValue will replace the start key with splitKey.
// Just for assertion this is been added
System.arraycopy("005".getBytes(), 0, expectedPut, 0, 3);
setExpected(expectedPut);
KeyValue kv =
new KeyValue(p1.getRow(), Constants.IDX_COL_FAMILY, Constants.IDX_COL_QUAL, 0,
indexColVal.array());
ret.add(kv);
KeyValue kv1 =
new KeyValue(p2.getRow(), Constants.IDX_COL_FAMILY, Constants.IDX_COL_QUAL, 0,
indexColVal.array());
ret.add(kv1);
KeyValue kv2 =
new KeyValue(p3.getRow(), Constants.IDX_COL_FAMILY, Constants.IDX_COL_QUAL, 0,
indexColVal.array());
ret.add(kv2);
KeyValue kv3 =
new KeyValue(p4.getRow(), Constants.IDX_COL_FAMILY, Constants.IDX_COL_QUAL, 0,
indexColVal.array());
ret.add(kv3);
return ret;
}
示例11: setSeekToRowKey
import org.apache.hadoop.hbase.index.util.ByteArrayBuilder; //导入依赖的package包/类
private void setSeekToRowKey(byte[] seekTorowKey3, ByteArrayBuilder indexColVal) {
KeyValue kv =
new KeyValue(seekTorowKey3, Constants.IDX_COL_FAMILY, Constants.IDX_COL_QUAL, 0,
indexColVal.array());
this.seekToKeyVal = kv;
}
示例12: getIndexDeletes
import org.apache.hadoop.hbase.index.util.ByteArrayBuilder; //导入依赖的package包/类
private static Collection<Delete> getIndexDeletes(List<IndexSpecification> indexSpecs,
HRegion userRegion, HRegion indexRegion, KeyValue deleteKV) throws IOException {
Collection<Delete> indexDeletes = new LinkedHashSet<Delete>();
List<IndexSpecification> indicesToUpdate = new LinkedList<IndexSpecification>();
Multimap<Long, KeyValue> groupedKV =
doGetAndGroupByTS(indexSpecs, userRegion, deleteKV, indicesToUpdate);
// There can be multiple index kvs for each user kv
// So, prepare all resultant index delete kvs for this user delete kv
for (Entry<Long, Collection<KeyValue>> entry : groupedKV.asMap().entrySet()) {
for (IndexSpecification index : indicesToUpdate) {
ByteArrayBuilder indexRow =
IndexUtils.getIndexRowKeyHeader(index, indexRegion.getStartKey(), deleteKV.getRow());
boolean update = false;
for (ColumnQualifier cq : index.getIndexColumns()) {
KeyValue kvFound = null;
for (KeyValue kv : entry.getValue()) {
if (Bytes.equals(cq.getColumnFamily(), kv.getFamily())
&& Bytes.equals(cq.getQualifier(), kv.getQualifier())) {
kvFound = kv;
update = true;
break;
}
}
if (kvFound == null) {
indexRow.position(indexRow.position() + cq.getMaxValueLength());
} else {
IndexUtils.updateRowKeyForKV(cq, kvFound, indexRow);
}
}
if (update) {
// Append the actual row key at the end of the index row key.
indexRow.put(deleteKV.getRow());
Delete idxDelete = new Delete(indexRow.array());
if (deleteKV.isDeleteType()) {
idxDelete
.deleteColumn(Constants.IDX_COL_FAMILY, Constants.IDX_COL_QUAL, entry.getKey());
} else {
idxDelete.deleteFamily(Constants.IDX_COL_FAMILY, entry.getKey());
}
idxDelete.setWriteToWAL(false);
indexDeletes.add(idxDelete);
}
}
}
return indexDeletes;
}
示例13: testFalsePositiveCases
import org.apache.hadoop.hbase.index.util.ByteArrayBuilder; //导入依赖的package包/类
@Test(timeout = 180000)
public void testFalsePositiveCases() throws Exception {
HBaseAdmin admin = UTIL.getHBaseAdmin();
ZooKeeperWatcher zkw = HBaseTestingUtility.getZooKeeperWatcher(UTIL);
final Configuration conf = UTIL.getConfiguration();
String userTableName = "testFalsePositiveCases";
IndexedHTableDescriptor ihtd = new IndexedHTableDescriptor(userTableName);
HColumnDescriptor hcd = new HColumnDescriptor("cf1");
ihtd.addFamily(hcd);
IndexSpecification indexSpecification =
createIndexSpecification(hcd, ValueType.String, 10, new String[] { "c1" }, "idx2");
ihtd.addIndex(indexSpecification);
indexSpecification =
createIndexSpecification(hcd, ValueType.String, 10, new String[] { "c2" }, "idx3");
ihtd.addIndex(indexSpecification);
admin.createTable(ihtd);
ZKAssign.blockUntilNoRIT(zkw);
HTable table = new HTable(conf, "testFalsePositiveCases");
HTable idx_table = new HTable(conf, "testFalsePositiveCases_idx");
ByteArrayBuilder byteArray = new ByteArrayBuilder(33);
byteArray.put(new byte[1]);
byteArray.put(Bytes.toBytes("idx2"));
byteArray.put(new byte[14]);
byteArray.put(Bytes.toBytes("apple"));
byteArray.put(new byte[5]);
int offset = byteArray.position();
byteArray.put(Bytes.toBytes("row1"));
ByteArrayBuilder value = new ByteArrayBuilder(4);
value.put(Bytes.toBytes((short) byteArray.array().length));
value.put(Bytes.toBytes((short) offset));
Put p = new Put(byteArray.array());
p.add(Constants.IDX_COL_FAMILY, Constants.IDX_COL_QUAL, value.array());
idx_table.put(p);
SingleColumnValueFilter filter =
new SingleColumnValueFilter("cf1".getBytes(), "c1".getBytes(), CompareOp.EQUAL,
"apple".getBytes());
SingleColumnValueFilter filter1 =
new SingleColumnValueFilter("cf1".getBytes(), "c2".getBytes(), CompareOp.EQUAL,
"bat".getBytes());
FilterList masterFilter = new FilterList(Operator.MUST_PASS_ONE);
masterFilter.addFilter(filter1);
masterFilter.addFilter(filter);
Scan scan = new Scan();
scan.setFilter(masterFilter);
int i = 0;
ResultScanner scanner = table.getScanner(scan);
List<Result> testRes = new ArrayList<Result>();
Result[] result = scanner.next(1);
while (result != null && result.length > 0) {
testRes.add(result[0]);
i++;
result = scanner.next(1);
}
Assert.assertTrue("Index flow should get used.", IndexRegionObserver.getIndexedFlowUsed());
Assert.assertTrue("Seekpoints should get added by index scanner",
IndexRegionObserver.getSeekpointAdded());
Assert.assertEquals("It should get two seek points from index scanner.", 1, IndexRegionObserver
.getMultipleSeekPoints().size());
Assert.assertEquals("Overall result should have only 2 rows", 0, testRes.size());
}
示例14: genSomeKeys
import org.apache.hadoop.hbase.index.util.ByteArrayBuilder; //导入依赖的package包/类
private List<KeyValue> genSomeKeys(String userTableName) throws Exception {
List<KeyValue> ret = new ArrayList<KeyValue>(4);
HBaseAdmin admin = UTIL.getHBaseAdmin();
ZooKeeperWatcher zkw = HBaseTestingUtility.getZooKeeperWatcher(UTIL);
IndexedHTableDescriptor ihtd = new IndexedHTableDescriptor(userTableName);
HColumnDescriptor hcd1 = new HColumnDescriptor("column1");
HColumnDescriptor hcd2 = new HColumnDescriptor("column2");
IndexSpecification iSpec1 = new IndexSpecification("Index");
iSpec1.addIndexColumn(hcd1, "q", ValueType.String, 10);
iSpec1.addIndexColumn(hcd2, "q", ValueType.String, 10);
ihtd.addFamily(hcd1);
ihtd.addFamily(hcd2);
ihtd.addIndex(iSpec1);
admin.createTable(ihtd);
ZKAssign.blockUntilNoRIT(zkw);
ByteArrayBuilder indexColVal = ByteArrayBuilder.allocate(4);
indexColVal.put(Bytes.toBytes((short) 3));
indexColVal.put(Bytes.toBytes((short) 32));
Put p1 = generatePuts("006".getBytes(), "05".getBytes());
Put p2 = generatePuts("003".getBytes(), "06".getBytes());
Put p3 = generatePuts("004".getBytes(), "06".getBytes());
Put p4 = generatePuts("007".getBytes(), "06".getBytes());
byte[] seekToPut =
new byte[3 + 1 + IndexUtils.getMaxIndexNameLength() + 10 + "006".getBytes().length];
System.arraycopy(p1.getRow(), 0, seekToPut, 0, p1.getRow().length);
byte[] seekToRow = "007".getBytes();
System.arraycopy(seekToRow, 0, seekToPut, p1.getRow().length - 3, seekToRow.length);
System.arraycopy("005".getBytes(), 0, seekToPut, 0, 3);
setSeekToRowKey(seekToPut, indexColVal);
byte[] expectedPut =
new byte[3 + 1 + IndexUtils.getMaxIndexNameLength() + 10 + "006".getBytes().length];
System.arraycopy(p4.getRow(), 0, expectedPut, 0, p4.getRow().length);
// Copy first 3 bytes to splitKey since getKeyValue will replace the start key with splitKey.
// Just for assertion this is been added
System.arraycopy("005".getBytes(), 0, expectedPut, 0, 3);
setExpected(expectedPut);
KeyValue kv =
new KeyValue(p1.getRow(), Constants.IDX_COL_FAMILY, Constants.IDX_COL_QUAL, 0,
indexColVal.array());
ret.add(kv);
KeyValue kv1 =
new KeyValue(p2.getRow(), Constants.IDX_COL_FAMILY, Constants.IDX_COL_QUAL, 0,
indexColVal.array());
ret.add(kv1);
KeyValue kv2 =
new KeyValue(p3.getRow(), Constants.IDX_COL_FAMILY, Constants.IDX_COL_QUAL, 0,
indexColVal.array());
ret.add(kv2);
KeyValue kv3 =
new KeyValue(p4.getRow(), Constants.IDX_COL_FAMILY, Constants.IDX_COL_QUAL, 0,
indexColVal.array());
ret.add(kv3);
return ret;
}