本文整理匯總了Java中org.apache.hadoop.hbase.KeyValue.getValueLength方法的典型用法代碼示例。如果您正苦於以下問題:Java KeyValue.getValueLength方法的具體用法?Java KeyValue.getValueLength怎麽用?Java KeyValue.getValueLength使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類org.apache.hadoop.hbase.KeyValue
的用法示例。
在下文中一共展示了KeyValue.getValueLength方法的6個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: prePut
import org.apache.hadoop.hbase.KeyValue; //導入方法依賴的package包/類
@Override
public void prePut(final ObserverContext<RegionCoprocessorEnvironment> e, final Put put,
final WALEdit edit, final Durability durability) throws IOException {
byte[] attribute = put.getAttribute("visibility");
byte[] cf = null;
List<Cell> updatedCells = new ArrayList<Cell>();
if (attribute != null) {
for (List<? extends Cell> edits : put.getFamilyCellMap().values()) {
for (Cell cell : edits) {
KeyValue kv = KeyValueUtil.ensureKeyValue(cell);
if (cf == null) {
cf = kv.getFamily();
}
Tag tag = new Tag(TAG_TYPE, attribute);
List<Tag> tagList = new ArrayList<Tag>();
tagList.add(tag);
KeyValue newKV = new KeyValue(kv.getRow(), 0, kv.getRowLength(), kv.getFamily(), 0,
kv.getFamilyLength(), kv.getQualifier(), 0, kv.getQualifierLength(),
kv.getTimestamp(), KeyValue.Type.codeToType(kv.getType()), kv.getValue(), 0,
kv.getValueLength(), tagList);
((List<Cell>) updatedCells).add(newKV);
}
}
put.getFamilyCellMap().remove(cf);
// Update the family map
put.getFamilyCellMap().put(cf, updatedCells);
}
}
示例2: prePut
import org.apache.hadoop.hbase.KeyValue; //導入方法依賴的package包/類
@Override
public void prePut(ObserverContext<RegionCoprocessorEnvironment> e, Put m, WALEdit edit,
Durability durability) throws IOException {
byte[] attribute = m.getAttribute(NON_VISIBILITY);
byte[] cf = null;
List<Cell> updatedCells = new ArrayList<Cell>();
if (attribute != null) {
for (List<? extends Cell> edits : m.getFamilyCellMap().values()) {
for (Cell cell : edits) {
KeyValue kv = KeyValueUtil.ensureKeyValue(cell);
if (cf == null) {
cf = kv.getFamily();
}
Tag tag = new Tag((byte) NON_VIS_TAG_TYPE, attribute);
List<Tag> tagList = new ArrayList<Tag>();
tagList.add(tag);
tagList.addAll(kv.getTags());
byte[] fromList = Tag.fromList(tagList);
TagRewriteCell newcell = new TagRewriteCell(kv, fromList);
KeyValue newKV = new KeyValue(kv.getRow(), 0, kv.getRowLength(), kv.getFamily(), 0,
kv.getFamilyLength(), kv.getQualifier(), 0, kv.getQualifierLength(),
kv.getTimestamp(), KeyValue.Type.codeToType(kv.getType()), kv.getValue(), 0,
kv.getValueLength(), tagList);
((List<Cell>) updatedCells).add(newcell);
}
}
m.getFamilyCellMap().remove(cf);
// Update the family map
m.getFamilyCellMap().put(cf, updatedCells);
}
}
示例3: updateMutationAddingTags
import org.apache.hadoop.hbase.KeyValue; //導入方法依賴的package包/類
private void updateMutationAddingTags(final Mutation m) {
byte[] attribute = m.getAttribute("visibility");
byte[] cf = null;
List<Cell> updatedCells = new ArrayList<Cell>();
if (attribute != null) {
for (List<? extends Cell> edits : m.getFamilyCellMap().values()) {
for (Cell cell : edits) {
KeyValue kv = KeyValueUtil.ensureKeyValue(cell);
if (cf == null) {
cf = kv.getFamily();
}
Tag tag = new Tag((byte) 1, attribute);
List<Tag> tagList = new ArrayList<Tag>();
tagList.add(tag);
KeyValue newKV = new KeyValue(kv.getRow(), 0, kv.getRowLength(), kv.getFamily(), 0,
kv.getFamilyLength(), kv.getQualifier(), 0, kv.getQualifierLength(),
kv.getTimestamp(), KeyValue.Type.codeToType(kv.getType()), kv.getValue(), 0,
kv.getValueLength(), tagList);
((List<Cell>) updatedCells).add(newKV);
}
}
m.getFamilyCellMap().remove(cf);
// Update the family map
m.getFamilyCellMap().put(cf, updatedCells);
}
}
示例4: IndexKeyValue
import org.apache.hadoop.hbase.KeyValue; //導入方法依賴的package包/類
public IndexKeyValue(KeyValue kv) {
this(kv.getBuffer(), kv.getRowOffset(), kv.getRowLength(), kv.getBuffer(), kv
.getQualifierOffset(), kv.getQualifierLength(), kv.getBuffer(), kv.getValueOffset(), kv
.getValueLength());
}
示例5: checkStatistics
import org.apache.hadoop.hbase.KeyValue; //導入方法依賴的package包/類
/**
* Check statistics for given HFile for different data block encoders.
* @param scanner Of file which will be compressed.
* @param kvLimit Maximal count of KeyValue which will be processed.
* @throws IOException thrown if scanner is invalid
*/
public void checkStatistics(final KeyValueScanner scanner, final int kvLimit)
throws IOException {
scanner.seek(KeyValue.LOWESTKEY);
KeyValue currentKV;
byte[] previousKey = null;
byte[] currentKey;
DataBlockEncoding[] encodings = DataBlockEncoding.values();
ByteArrayOutputStream uncompressedOutputStream =
new ByteArrayOutputStream();
int j = 0;
while ((currentKV = KeyValueUtil.ensureKeyValue(scanner.next())) != null && j < kvLimit) {
// Iterates through key/value pairs
j++;
currentKey = currentKV.getKey();
if (previousKey != null) {
for (int i = 0; i < previousKey.length && i < currentKey.length &&
previousKey[i] == currentKey[i]; ++i) {
totalKeyRedundancyLength++;
}
}
uncompressedOutputStream.write(currentKV.getBuffer(),
currentKV.getOffset(), currentKV.getLength());
previousKey = currentKey;
int kLen = currentKV.getKeyLength();
int vLen = currentKV.getValueLength();
int cfLen = currentKV.getFamilyLength(currentKV.getFamilyOffset());
int restLen = currentKV.getLength() - kLen - vLen;
totalKeyLength += kLen;
totalValueLength += vLen;
totalPrefixLength += restLen;
totalCFLength += cfLen;
}
rawKVs = uncompressedOutputStream.toByteArray();
boolean useTag = (currentKV.getTagsLength() > 0);
for (DataBlockEncoding encoding : encodings) {
if (encoding == DataBlockEncoding.NONE) {
continue;
}
DataBlockEncoder d = encoding.getEncoder();
HFileContext meta = new HFileContextBuilder()
.withCompression(Compression.Algorithm.NONE)
.withIncludesMvcc(includesMemstoreTS)
.withIncludesTags(useTag).build();
codecs.add(new EncodedDataBlock(d, encoding, rawKVs, meta ));
}
}
示例6: createTFile
import org.apache.hadoop.hbase.KeyValue; //導入方法依賴的package包/類
private void createTFile() throws IOException {
long totalBytes = 0;
FSDataOutputStream fout = createFSOutput(path, fs);
try {
HFileContext context = new HFileContextBuilder()
.withBlockSize(options.minBlockSize)
.withCompression(AbstractHFileWriter.compressionByName(options.compress))
.build();
Writer writer = HFile.getWriterFactoryNoCache(conf)
.withOutputStream(fout)
.withFileContext(context)
.withComparator(new KeyValue.RawBytesComparator())
.create();
try {
BytesWritable key = new BytesWritable();
BytesWritable val = new BytesWritable();
timer.start();
for (long i = 0; true; ++i) {
if (i % 1000 == 0) { // test the size for every 1000 rows.
if (fs.getFileStatus(path).getLen() >= options.fileSize) {
break;
}
}
kvGen.next(key, val, false);
byte [] k = new byte [key.getLength()];
System.arraycopy(key.getBytes(), 0, k, 0, key.getLength());
byte [] v = new byte [val.getLength()];
System.arraycopy(val.getBytes(), 0, v, 0, key.getLength());
KeyValue kv = new KeyValue(k, CF, QUAL, v);
writer.append(kv);
totalBytes += kv.getKeyLength();
totalBytes += kv.getValueLength();
}
timer.stop();
}
finally {
writer.close();
}
}
finally {
fout.close();
}
double duration = (double)timer.read()/1000; // in us.
long fsize = fs.getFileStatus(path).getLen();
System.out.printf(
"time: %s...uncompressed: %.2fMB...raw thrpt: %.2fMB/s\n",
timer.toString(), (double) totalBytes / 1024 / 1024, totalBytes
/ duration);
System.out.printf("time: %s...file size: %.2fMB...disk thrpt: %.2fMB/s\n",
timer.toString(), (double) fsize / 1024 / 1024, fsize / duration);
}