本文整理汇总了Java中org.apache.hadoop.hbase.KeyValue类的典型用法代码示例。如果您正苦于以下问题:Java KeyValue类的具体用法?Java KeyValue怎么用?Java KeyValue使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
KeyValue类属于org.apache.hadoop.hbase包,在下文中一共展示了KeyValue类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testSeekToBlockWithDecreasingCommonPrefix
import org.apache.hadoop.hbase.KeyValue; //导入依赖的package包/类
/**
* Test seeking while file is encoded.
*/
@Test
public void testSeekToBlockWithDecreasingCommonPrefix() throws IOException {
List<KeyValue> sampleKv = new ArrayList<KeyValue>();
KeyValue kv1 = new KeyValue(Bytes.toBytes("row10aaa"), Bytes.toBytes("f1"),
Bytes.toBytes("q1"), Bytes.toBytes("val"));
sampleKv.add(kv1);
KeyValue kv2 = new KeyValue(Bytes.toBytes("row10aaa"), Bytes.toBytes("f1"),
Bytes.toBytes("q2"), Bytes.toBytes("val"));
sampleKv.add(kv2);
KeyValue kv3 = new KeyValue(Bytes.toBytes("row10aaa"), Bytes.toBytes("f1"),
Bytes.toBytes("q3"), Bytes.toBytes("val"));
sampleKv.add(kv3);
KeyValue kv4 = new KeyValue(Bytes.toBytes("row11baa"), Bytes.toBytes("f1"),
Bytes.toBytes("q1"), Bytes.toBytes("val"));
sampleKv.add(kv4);
KeyValue toSeek = KeyValueUtil.createLastOnRow(kv3.getRowArray(), kv3.getRowOffset(),
kv3.getRowLength(), null, 0, 0, null, 0, 0);
seekToTheKey(kv3, sampleKv, toSeek);
}
示例2: deleteAllTs
import org.apache.hadoop.hbase.KeyValue; //导入依赖的package包/类
@Override
public void deleteAllTs(ByteBuffer tableName,
ByteBuffer row,
ByteBuffer column,
long timestamp, Map<ByteBuffer, ByteBuffer> attributes) throws IOError {
Table table = null;
try {
table = getTable(tableName);
Delete delete = new Delete(getBytes(row));
addAttributes(delete, attributes);
byte [][] famAndQf = KeyValue.parseColumn(getBytes(column));
if (famAndQf.length == 1) {
delete.deleteFamily(famAndQf[0], timestamp);
} else {
delete.deleteColumns(famAndQf[0], famAndQf[1], timestamp);
}
table.delete(delete);
} catch (IOException e) {
LOG.warn(e.getMessage(), e);
throw new IOError(Throwables.getStackTraceAsString(e));
} finally {
closeTable(table);
}
}
示例3: getAllRecord
import org.apache.hadoop.hbase.KeyValue; //导入依赖的package包/类
/**
* show data
*/
public static void getAllRecord(String tableName) {
try {
Table table = connection.getTable(TableName.valueOf(tableName));
Scan s = new Scan();
ResultScanner ss = table.getScanner(s);
for (Result r : ss) {
for (KeyValue kv : r.raw()) {
System.out.print(new String(kv.getRow()) + " ");
System.out.print(new String(kv.getFamily()) + ":");
System.out.print(new String(kv.getQualifier()) + " ");
System.out.print(kv.getTimestamp() + " ");
System.out.println(new String(kv.getValue()));
}
}
} catch (IOException e) {
e.printStackTrace();
}
}
示例4: fromFilter
import org.apache.hadoop.hbase.KeyValue; //导入依赖的package包/类
public static Range[] fromFilter(SingleColumnValueFilter filter) {
if (!(filter.getComparator() instanceof BinaryComparator)) {
return new Range[0];
}
byte[] column = KeyValue.makeColumn(filter.getFamily(), filter.getQualifier());
CompareOp compareOp = filter.getOperator();
byte[] value = filter.getComparator().getValue();
if (compareOp == CompareOp.NOT_EQUAL) {
return new Range[] { new Range(column, null, CompareOp.NO_OP, value, CompareOp.LESS),
new Range(column, value, CompareOp.GREATER, null, CompareOp.NO_OP) };
} else {
switch (compareOp) {
case EQUAL:
case GREATER_OR_EQUAL:
case GREATER:
return new Range[] { new Range(column, value, compareOp, null, CompareOp.NO_OP) };
case LESS:
case LESS_OR_EQUAL:
return new Range[] { new Range(column, null, CompareOp.NO_OP, value, compareOp) };
default:
return new Range[0];
}
}
}
示例5: getAllRecord
import org.apache.hadoop.hbase.KeyValue; //导入依赖的package包/类
/**
* Getting all records a row from an existing SS tables
* @method getAllRecord
* @inputParameters hbaseBtable Name used
* @return type: no return type as its a void method
*
**/
@SuppressWarnings({ "deprecation", "resource" })
public static void getAllRecord(String myHbaseBtableName) {
ResultScanner hbaseBSs = null;
try {
HTable hbaseBtable = new HTable(hbaseBconf, myHbaseBtableName);
Scan hbaseBScan = new Scan();
hbaseBSs = hbaseBtable.getScanner(hbaseBScan);
for (Result r : hbaseBSs) {
for (KeyValue hbaseBkv : r.raw()) {
System.out.print(new String(hbaseBkv.getRow()) + " ");
System.out.print(new String(hbaseBkv.getFamily()) + ":");
System.out.print(new String(hbaseBkv.getQualifier()) + " ");
System.out.print(hbaseBkv.getTimestamp() + " ");
System.out.println(new String(hbaseBkv.getValue()));
}
}
} catch (IOException eio) {
eip.printStackTrace();
} finally {
if (hbaseBSs != null) hbaseBSs.close();
// closing the ss hbaseBtable
}
}
示例6: testSeekToBlockWithDiffFamilyAndQualifer
import org.apache.hadoop.hbase.KeyValue; //导入依赖的package包/类
@Test
public void testSeekToBlockWithDiffFamilyAndQualifer() throws IOException {
List<KeyValue> sampleKv = new ArrayList<KeyValue>();
KeyValue kv1 = new KeyValue(Bytes.toBytes("aaa"), Bytes.toBytes("fam1"), Bytes.toBytes("q1"),
Bytes.toBytes("val"));
sampleKv.add(kv1);
KeyValue kv2 = new KeyValue(Bytes.toBytes("aab"), Bytes.toBytes("fam1"), Bytes.toBytes("q1"),
Bytes.toBytes("val"));
sampleKv.add(kv2);
KeyValue kv4 = new KeyValue(Bytes.toBytes("aac"), Bytes.toBytes("fam1"), Bytes.toBytes("q1"),
Bytes.toBytes("val"));
sampleKv.add(kv4);
KeyValue kv5 = new KeyValue(Bytes.toBytes("aac"), Bytes.toBytes("fam1"), Bytes.toBytes("q2"),
Bytes.toBytes("val"));
sampleKv.add(kv5);
KeyValue toSeek = new KeyValue(Bytes.toBytes("aac"), Bytes.toBytes("fam2"),
Bytes.toBytes("q2"), Bytes.toBytes("val"));
seekToTheKey(kv5, sampleKv, toSeek);
}
示例7: testSeekToBlockWithDiffQualiferOnSameRowButDescendingInSize
import org.apache.hadoop.hbase.KeyValue; //导入依赖的package包/类
@Test
public void testSeekToBlockWithDiffQualiferOnSameRowButDescendingInSize() throws IOException {
List<KeyValue> sampleKv = new ArrayList<KeyValue>();
KeyValue kv1 = new KeyValue(Bytes.toBytes("aaa"), Bytes.toBytes("f1"), Bytes.toBytes("qual1"),
Bytes.toBytes("val"));
sampleKv.add(kv1);
KeyValue kv2 = new KeyValue(Bytes.toBytes("aaa"), Bytes.toBytes("f1"), Bytes.toBytes("qual2"),
Bytes.toBytes("val"));
sampleKv.add(kv2);
KeyValue kv4 = new KeyValue(Bytes.toBytes("aaa"), Bytes.toBytes("f1"), Bytes.toBytes("qual3"),
Bytes.toBytes("val"));
sampleKv.add(kv4);
KeyValue kv5 = new KeyValue(Bytes.toBytes("aaa"), Bytes.toBytes("f1"), Bytes.toBytes("qual4"),
Bytes.toBytes("val"));
sampleKv.add(kv5);
KeyValue kv6 = new KeyValue(Bytes.toBytes("aaa"), Bytes.toBytes("f1"), Bytes.toBytes("qz"),
Bytes.toBytes("val"));
sampleKv.add(kv6);
KeyValue toSeek = new KeyValue(Bytes.toBytes("aaa"), Bytes.toBytes("f1"), Bytes.toBytes("qz"),
Bytes.toBytes("val"));
seekToTheKey(kv6, sampleKv, toSeek);
}
示例8: recoverClusteringResult
import org.apache.hadoop.hbase.KeyValue; //导入依赖的package包/类
public static List<Cell> recoverClusteringResult(List<Cell> cells, byte[] family,
byte[] qualifier) {
if (cells == null || cells.size() == 0) return cells;
byte[][] indexColumn = IndexPutParser.parseIndexRowKey(cells.get(0).getRow());
List<Cell> list = new ArrayList<>(cells.size() + 1);
for (Cell cell : cells) {
byte[] tag = cell.getTagsArray();
if (tag != null && tag.length > KeyValue.MAX_TAGS_LENGTH) tag = null;
KeyValue kv =
new KeyValue(indexColumn[0], CellUtil.cloneFamily(cell), CellUtil.cloneQualifier(cell),
cell.getTimestamp(), KeyValue.Type.codeToType(cell.getTypeByte()),
CellUtil.cloneValue(cell), tag);
list.add(kv);
}
list.add(new KeyValue(indexColumn[0], family, qualifier, indexColumn[1]));
Collections.sort(list, KeyValue.COMPARATOR);
return list;
}
示例9: testMixedPutDelete
import org.apache.hadoop.hbase.KeyValue; //导入依赖的package包/类
/**
* Insert a mix of puts and deletes
* @throws Exception
*/
@Test
public void testMixedPutDelete() throws Exception {
List<WALEntry> entries = new ArrayList<WALEntry>(BATCH_SIZE/2);
List<Cell> cells = new ArrayList<Cell>();
for(int i = 0; i < BATCH_SIZE/2; i++) {
entries.add(createEntry(TABLE_NAME1, i, KeyValue.Type.Put, cells));
}
SINK.replicateEntries(entries, CellUtil.createCellScanner(cells));
entries = new ArrayList<WALEntry>(BATCH_SIZE);
cells = new ArrayList<Cell>();
for(int i = 0; i < BATCH_SIZE; i++) {
entries.add(createEntry(TABLE_NAME1, i,
i % 2 != 0 ? KeyValue.Type.Put: KeyValue.Type.DeleteColumn, cells));
}
SINK.replicateEntries(entries, CellUtil.createCellScanner(cells.iterator()));
Scan scan = new Scan();
ResultScanner scanRes = table1.getScanner(scan);
assertEquals(BATCH_SIZE/2, scanRes.next(BATCH_SIZE).length);
}
示例10: insertData
import org.apache.hadoop.hbase.KeyValue; //导入依赖的package包/类
private static int insertData(TableName tableName, String column, double prob) throws IOException {
byte[] k = new byte[3];
byte[][] famAndQf = KeyValue.parseColumn(Bytes.toBytes(column));
List<Put> puts = new ArrayList<>();
for (int i = 0; i < 9; i++) {
Put put = new Put(Bytes.toBytes("row" + i));
put.setDurability(Durability.SKIP_WAL);
put.add(famAndQf[0], famAndQf[1], k);
put.setCellVisibility(new CellVisibility("(" + SECRET + "|" + CONFIDENTIAL + ")" + "&" + "!"
+ TOPSECRET));
puts.add(put);
}
try (Table table = new HTable(TEST_UTIL.getConfiguration(), tableName)) {
table.put(puts);
}
return puts.size();
}
示例11: createBlockOnDisk
import org.apache.hadoop.hbase.KeyValue; //导入依赖的package包/类
private HFileBlock createBlockOnDisk(List<KeyValue> kvs, HFileBlock block, boolean useTags)
throws IOException {
int size;
HFileBlockEncodingContext context = new HFileBlockDefaultEncodingContext(
blockEncoder.getDataBlockEncoding(), HConstants.HFILEBLOCK_DUMMY_HEADER,
block.getHFileContext());
ByteArrayOutputStream baos = new ByteArrayOutputStream();
baos.write(block.getDummyHeaderForVersion());
DataOutputStream dos = new DataOutputStream(baos);
blockEncoder.startBlockEncoding(context, dos);
for (KeyValue kv : kvs) {
blockEncoder.encode(kv, context, dos);
}
BufferGrabbingByteArrayOutputStream stream = new BufferGrabbingByteArrayOutputStream();
baos.writeTo(stream);
blockEncoder.endBlockEncoding(context, dos, stream.getBuffer(), BlockType.DATA);
byte[] encodedBytes = baos.toByteArray();
size = encodedBytes.length - block.getDummyHeaderForVersion().length;
return new HFileBlock(context.getBlockType(), size, size, -1, ByteBuffer.wrap(encodedBytes),
HFileBlock.FILL_HEADER, 0, block.getOnDiskDataSizeWithHeader(), block.getHFileContext());
}
示例12: testCreateKey
import org.apache.hadoop.hbase.KeyValue; //导入依赖的package包/类
@Test
public void testCreateKey() {
CompoundBloomFilterBase cbfb = new CompoundBloomFilterBase();
byte[] row = "myRow".getBytes();
byte[] qualifier = "myQualifier".getBytes();
byte[] rowKey = cbfb.createBloomKey(row, 0, row.length,
row, 0, 0);
byte[] rowColKey = cbfb.createBloomKey(row, 0, row.length,
qualifier, 0, qualifier.length);
KeyValue rowKV = KeyValue.createKeyValueFromKey(rowKey);
KeyValue rowColKV = KeyValue.createKeyValueFromKey(rowColKey);
assertEquals(rowKV.getTimestamp(), rowColKV.getTimestamp());
assertEquals(Bytes.toStringBinary(rowKV.getRow()),
Bytes.toStringBinary(rowColKV.getRow()));
assertEquals(0, rowKV.getQualifier().length);
}
示例13: writeStoreFile
import org.apache.hadoop.hbase.KeyValue; //导入依赖的package包/类
private void writeStoreFile(final StoreFile.Writer writer) throws IOException {
byte[] fam = Bytes.toBytes("f");
byte[] qualifier = Bytes.toBytes("q");
long now = System.currentTimeMillis();
byte[] b = Bytes.toBytes("k1");
Tag t1 = new Tag((byte) 1, "tag1");
Tag t2 = new Tag((byte) 2, "tag2");
Tag t3 = new Tag((byte) 3, "tag3");
try {
writer.append(new KeyValue(b, fam, qualifier, now, b, new Tag[] { t1 }));
b = Bytes.toBytes("k3");
writer.append(new KeyValue(b, fam, qualifier, now, b, new Tag[] { t2, t1 }));
b = Bytes.toBytes("k4");
writer.append(new KeyValue(b, fam, qualifier, now, b, new Tag[] { t3 }));
b = Bytes.toBytes("k5");
writer.append(new KeyValue(b, fam, qualifier, now, b, new Tag[] { t3 }));
} finally {
writer.close();
}
}
示例14: GetClosestRowBeforeTracker
import org.apache.hadoop.hbase.KeyValue; //导入依赖的package包/类
/**
* @param c
* @param kv Presume first on row: i.e. empty column, maximum timestamp and
* a type of Type.Maximum
* @param ttl Time to live in ms for this Store
* @param metaregion True if this is hbase:meta or -ROOT- region.
*/
GetClosestRowBeforeTracker(final KVComparator c, final KeyValue kv,
final long ttl, final boolean metaregion) {
super();
this.metaregion = metaregion;
this.targetkey = kv;
// If we are in a metaregion, then our table name is the prefix on the
// targetkey.
this.rowoffset = kv.getRowOffset();
int l = -1;
if (metaregion) {
l = KeyValue.getDelimiter(kv.getRowArray(), rowoffset, kv.getRowLength(),
HConstants.DELIMITER) - this.rowoffset;
}
this.tablenamePlusDelimiterLength = metaregion? l + 1: -1;
this.now = System.currentTimeMillis();
this.oldestUnexpiredTs = now - ttl;
this.kvcomparator = c;
KeyValue.RowOnlyComparator rc = new KeyValue.RowOnlyComparator(this.kvcomparator);
this.deletes = new TreeMap<KeyValue, NavigableSet<KeyValue>>(rc);
}
示例15: write
import org.apache.hadoop.hbase.KeyValue; //导入依赖的package包/类
@Override
public void write(DataOutput out) throws IOException {
LOG.warn("WALEdit is being serialized to writable - only expected in test code");
out.writeInt(VERSION_2);
out.writeInt(cells.size());
// We interleave the two lists for code simplicity
for (Cell cell : cells) {
// This is not used in any of the core code flows so it is just fine to convert to KV
KeyValue kv = KeyValueUtil.ensureKeyValue(cell);
if (compressionContext != null) {
KeyValueCompression.writeKV(out, kv, compressionContext);
} else{
KeyValue.write(kv, out);
}
}
if (scopes == null) {
out.writeInt(0);
} else {
out.writeInt(scopes.size());
for (byte[] key : scopes.keySet()) {
Bytes.writeByteArray(out, key);
out.writeInt(scopes.get(key));
}
}
}