本文整理匯總了Java中org.apache.hadoop.hbase.KeyValue.KeyOnlyKeyValue方法的典型用法代碼示例。如果您正苦於以下問題:Java KeyValue.KeyOnlyKeyValue方法的具體用法?Java KeyValue.KeyOnlyKeyValue怎麽用?Java KeyValue.KeyOnlyKeyValue使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類org.apache.hadoop.hbase.KeyValue
的用法示例。
在下文中一共展示了KeyValue.KeyOnlyKeyValue方法的6個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: binarySearch
import org.apache.hadoop.hbase.KeyValue; //導入方法依賴的package包/類
/**
* Binary search for keys in indexes.
*
* @param arr array of byte arrays to search for
* @param key the key you want to find
* @param comparator a comparator to compare.
* @return zero-based index of the key, if the key is present in the array.
* Otherwise, a value -(i + 1) such that the key is between arr[i -
* 1] and arr[i] non-inclusively, where i is in [0, i], if we define
* arr[-1] = -Inf and arr[N] = Inf for an N-element array. The above
* means that this function can return 2N + 1 different values
* ranging from -(N + 1) to N - 1.
* @return the index of the block
*/
public static int binarySearch(byte[][] arr, Cell key, RawComparator<Cell> comparator) {
int low = 0;
int high = arr.length - 1;
KeyValue.KeyOnlyKeyValue r = new KeyValue.KeyOnlyKeyValue();
while (low <= high) {
int mid = (low+high) >>> 1;
// we have to compare in this order, because the comparator order
// has special logic when the 'left side' is a special key.
r.setKey(arr[mid], 0, arr[mid].length);
int cmp = comparator.compare(key, r);
// key lives above the midpoint
if (cmp > 0)
low = mid + 1;
// key lives below the midpoint
else if (cmp < 0)
high = mid - 1;
// BAM. how often does this really happen?
else
return mid;
}
return - (low+1);
}
示例2: seekBefore
import org.apache.hadoop.hbase.KeyValue; //導入方法依賴的package包/類
@Override
public boolean seekBefore(Cell key) throws IOException {
HFileBlock seekToBlock = reader.getDataBlockIndexReader().seekToDataBlock(key, block,
cacheBlocks, pread, isCompaction,
((HFileReaderV2) reader).getEffectiveEncodingInCache(isCompaction));
if (seekToBlock == null) {
return false;
}
ByteBuffer firstKey = getFirstKeyInBlock(seekToBlock);
if (reader.getComparator()
.compareOnlyKeyPortion(
new KeyValue.KeyOnlyKeyValue(firstKey.array(), firstKey.arrayOffset(),
firstKey.limit()), key) >= 0) {
long previousBlockOffset = seekToBlock.getPrevBlockOffset();
// The key we are interested in
if (previousBlockOffset == -1) {
// we have a 'problem', the key we want is the first of the file.
return false;
}
// It is important that we compute and pass onDiskSize to the block
// reader so that it does not have to read the header separately to
// figure out the size. Currently, we do not have a way to do this
// correctly in the general case however.
// TODO: See https://issues.apache.org/jira/browse/HBASE-14576
int prevBlockSize = -1;
seekToBlock = reader.readBlock(previousBlockOffset,
prevBlockSize, cacheBlocks,
pread, isCompaction, true, BlockType.DATA, getEffectiveDataBlockEncoding());
// TODO shortcut: seek forward in this block to the last key of the
// block.
}
Cell firstKeyInCurrentBlock = new KeyValue.KeyOnlyKeyValue(Bytes.getBytes(firstKey));
loadBlockAndSeekToKey(seekToBlock, firstKeyInCurrentBlock, true, key, true);
return true;
}
示例3: HalfStoreFileReader
import org.apache.hadoop.hbase.KeyValue; //導入方法依賴的package包/類
/**
* Creates a half file reader for a normal hfile.
* @param fs fileystem to read from
* @param p path to hfile
* @param cacheConf
* @param r original reference file (contains top or bottom)
* @param conf Configuration
* @throws IOException
*/
public HalfStoreFileReader(final FileSystem fs, final Path p,
final CacheConfig cacheConf, final Reference r, final Configuration conf)
throws IOException {
super(fs, p, cacheConf, conf);
// This is not actual midkey for this half-file; its just border
// around which we split top and bottom. Have to look in files to find
// actual last and first keys for bottom and top halves. Half-files don't
// have an actual midkey themselves. No midkey is how we indicate file is
// not splittable.
this.splitkey = r.getSplitKey();
this.splitCell = new KeyValue.KeyOnlyKeyValue(this.splitkey, 0, this.splitkey.length);
// Is it top or bottom half?
this.top = Reference.isTopFileRegion(r.getFileRegion());
}
示例4: seekToOrBeforeUsingPositionAtOrBefore
import org.apache.hadoop.hbase.KeyValue; //導入方法依賴的package包/類
protected int seekToOrBeforeUsingPositionAtOrBefore(byte[] keyOnlyBytes, int offset, int length,
boolean seekBefore){
// this does a deep copy of the key byte[] because the CellSearcher interface wants a Cell
KeyValue kv = new KeyValue.KeyOnlyKeyValue(keyOnlyBytes, offset, length);
return seekToOrBeforeUsingPositionAtOrBefore(kv, seekBefore);
}
示例5: seekToOrBeforeUsingPositionAtOrAfter
import org.apache.hadoop.hbase.KeyValue; //導入方法依賴的package包/類
protected int seekToOrBeforeUsingPositionAtOrAfter(byte[] keyOnlyBytes, int offset, int length,
boolean seekBefore) {
// this does a deep copy of the key byte[] because the CellSearcher
// interface wants a Cell
KeyValue kv = new KeyValue.KeyOnlyKeyValue(keyOnlyBytes, offset, length);
return seekToOrBeforeUsingPositionAtOrAfter(kv, seekBefore);
}
示例6: invalidate
import org.apache.hadoop.hbase.KeyValue; //導入方法依賴的package包/類
protected void invalidate() {
valueOffset = -1;
tagsCompressedLength = 0;
currentKey = new KeyValue.KeyOnlyKeyValue();
uncompressTags = true;
currentBuffer = null;
}