本文整理汇总了Java中org.apache.hadoop.hbase.KeyValue.KeyOnlyKeyValue方法的典型用法代码示例。如果您正苦于以下问题:Java KeyValue.KeyOnlyKeyValue方法的具体用法?Java KeyValue.KeyOnlyKeyValue怎么用?Java KeyValue.KeyOnlyKeyValue使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hbase.KeyValue
的用法示例。
在下文中一共展示了KeyValue.KeyOnlyKeyValue方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: binarySearch
import org.apache.hadoop.hbase.KeyValue; //导入方法依赖的package包/类
/**
* Binary search for keys in indexes.
*
* @param arr array of byte arrays to search for
* @param key the key you want to find
* @param comparator a comparator to compare.
* @return zero-based index of the key, if the key is present in the array.
* Otherwise, a value -(i + 1) such that the key is between arr[i -
* 1] and arr[i] non-inclusively, where i is in [0, i], if we define
* arr[-1] = -Inf and arr[N] = Inf for an N-element array. The above
* means that this function can return 2N + 1 different values
* ranging from -(N + 1) to N - 1.
* @return the index of the block
*/
public static int binarySearch(byte[][] arr, Cell key, RawComparator<Cell> comparator) {
int low = 0;
int high = arr.length - 1;
KeyValue.KeyOnlyKeyValue r = new KeyValue.KeyOnlyKeyValue();
while (low <= high) {
int mid = (low+high) >>> 1;
// we have to compare in this order, because the comparator order
// has special logic when the 'left side' is a special key.
r.setKey(arr[mid], 0, arr[mid].length);
int cmp = comparator.compare(key, r);
// key lives above the midpoint
if (cmp > 0)
low = mid + 1;
// key lives below the midpoint
else if (cmp < 0)
high = mid - 1;
// BAM. how often does this really happen?
else
return mid;
}
return - (low+1);
}
示例2: seekBefore
import org.apache.hadoop.hbase.KeyValue; //导入方法依赖的package包/类
@Override
public boolean seekBefore(Cell key) throws IOException {
HFileBlock seekToBlock = reader.getDataBlockIndexReader().seekToDataBlock(key, block,
cacheBlocks, pread, isCompaction,
((HFileReaderV2) reader).getEffectiveEncodingInCache(isCompaction));
if (seekToBlock == null) {
return false;
}
ByteBuffer firstKey = getFirstKeyInBlock(seekToBlock);
if (reader.getComparator()
.compareOnlyKeyPortion(
new KeyValue.KeyOnlyKeyValue(firstKey.array(), firstKey.arrayOffset(),
firstKey.limit()), key) >= 0) {
long previousBlockOffset = seekToBlock.getPrevBlockOffset();
// The key we are interested in
if (previousBlockOffset == -1) {
// we have a 'problem', the key we want is the first of the file.
return false;
}
// It is important that we compute and pass onDiskSize to the block
// reader so that it does not have to read the header separately to
// figure out the size. Currently, we do not have a way to do this
// correctly in the general case however.
// TODO: See https://issues.apache.org/jira/browse/HBASE-14576
int prevBlockSize = -1;
seekToBlock = reader.readBlock(previousBlockOffset,
prevBlockSize, cacheBlocks,
pread, isCompaction, true, BlockType.DATA, getEffectiveDataBlockEncoding());
// TODO shortcut: seek forward in this block to the last key of the
// block.
}
Cell firstKeyInCurrentBlock = new KeyValue.KeyOnlyKeyValue(Bytes.getBytes(firstKey));
loadBlockAndSeekToKey(seekToBlock, firstKeyInCurrentBlock, true, key, true);
return true;
}
示例3: HalfStoreFileReader
import org.apache.hadoop.hbase.KeyValue; //导入方法依赖的package包/类
/**
* Creates a half file reader for a normal hfile.
* @param fs fileystem to read from
* @param p path to hfile
* @param cacheConf
* @param r original reference file (contains top or bottom)
* @param conf Configuration
* @throws IOException
*/
public HalfStoreFileReader(final FileSystem fs, final Path p,
final CacheConfig cacheConf, final Reference r, final Configuration conf)
throws IOException {
super(fs, p, cacheConf, conf);
// This is not actual midkey for this half-file; its just border
// around which we split top and bottom. Have to look in files to find
// actual last and first keys for bottom and top halves. Half-files don't
// have an actual midkey themselves. No midkey is how we indicate file is
// not splittable.
this.splitkey = r.getSplitKey();
this.splitCell = new KeyValue.KeyOnlyKeyValue(this.splitkey, 0, this.splitkey.length);
// Is it top or bottom half?
this.top = Reference.isTopFileRegion(r.getFileRegion());
}
示例4: seekToOrBeforeUsingPositionAtOrBefore
import org.apache.hadoop.hbase.KeyValue; //导入方法依赖的package包/类
protected int seekToOrBeforeUsingPositionAtOrBefore(byte[] keyOnlyBytes, int offset, int length,
boolean seekBefore){
// this does a deep copy of the key byte[] because the CellSearcher interface wants a Cell
KeyValue kv = new KeyValue.KeyOnlyKeyValue(keyOnlyBytes, offset, length);
return seekToOrBeforeUsingPositionAtOrBefore(kv, seekBefore);
}
示例5: seekToOrBeforeUsingPositionAtOrAfter
import org.apache.hadoop.hbase.KeyValue; //导入方法依赖的package包/类
protected int seekToOrBeforeUsingPositionAtOrAfter(byte[] keyOnlyBytes, int offset, int length,
boolean seekBefore) {
// this does a deep copy of the key byte[] because the CellSearcher
// interface wants a Cell
KeyValue kv = new KeyValue.KeyOnlyKeyValue(keyOnlyBytes, offset, length);
return seekToOrBeforeUsingPositionAtOrAfter(kv, seekBefore);
}
示例6: invalidate
import org.apache.hadoop.hbase.KeyValue; //导入方法依赖的package包/类
protected void invalidate() {
valueOffset = -1;
tagsCompressedLength = 0;
currentKey = new KeyValue.KeyOnlyKeyValue();
uncompressTags = true;
currentBuffer = null;
}