本文整理汇总了Java中org.apache.hadoop.hbase.KeyValue.createKeyValueFromKey方法的典型用法代码示例。如果您正苦于以下问题:Java KeyValue.createKeyValueFromKey方法的具体用法?Java KeyValue.createKeyValueFromKey怎么用?Java KeyValue.createKeyValueFromKey使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hbase.KeyValue
的用法示例。
在下文中一共展示了KeyValue.createKeyValueFromKey方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testCreateKey
import org.apache.hadoop.hbase.KeyValue; //导入方法依赖的package包/类
@Test
public void testCreateKey() {
CompoundBloomFilterBase cbfb = new CompoundBloomFilterBase();
byte[] row = "myRow".getBytes();
byte[] qualifier = "myQualifier".getBytes();
byte[] rowKey = cbfb.createBloomKey(row, 0, row.length,
row, 0, 0);
byte[] rowColKey = cbfb.createBloomKey(row, 0, row.length,
qualifier, 0, qualifier.length);
KeyValue rowKV = KeyValue.createKeyValueFromKey(rowKey);
KeyValue rowColKV = KeyValue.createKeyValueFromKey(rowColKey);
assertEquals(rowKV.getTimestamp(), rowColKV.getTimestamp());
assertEquals(Bytes.toStringBinary(rowKV.getRow()),
Bytes.toStringBinary(rowColKV.getRow()));
assertEquals(0, rowKV.getQualifier().length);
}
示例2: getFileSplitPoint
import org.apache.hadoop.hbase.KeyValue; //导入方法依赖的package包/类
/**
* Gets the approximate mid-point of this file that is optimal for use in splitting it.
*
* @param comparator Comparator used to compare KVs.
* @return The split point row, or null if splitting is not possible, or reader is null.
*/
@SuppressWarnings("deprecation") byte[] getFileSplitPoint(KVComparator comparator)
throws IOException {
if (this.reader == null) {
LOG.warn("Storefile " + this + " Reader is null; cannot get split point");
return null;
}
// Get first, last, and mid keys. Midkey is the key that starts block
// in middle of hfile. Has column and timestamp. Need to return just
// the row we want to split on as midkey.
byte[] midkey = this.reader.midkey();
if (midkey != null) {
KeyValue mk = KeyValue.createKeyValueFromKey(midkey, 0, midkey.length);
byte[] fk = this.reader.getFirstKey();
KeyValue firstKey = KeyValue.createKeyValueFromKey(fk, 0, fk.length);
byte[] lk = this.reader.getLastKey();
KeyValue lastKey = KeyValue.createKeyValueFromKey(lk, 0, lk.length);
// if the midkey is the same as the first or last keys, we cannot (ever) split this region.
if (comparator.compareRows(mk, firstKey) == 0 || comparator.compareRows(mk, lastKey) == 0) {
if (LOG.isDebugEnabled()) {
LOG.debug("cannot split because midkey is the same as first or last row");
}
return null;
}
return mk.getRow();
}
return null;
}
示例3: testHalfScanAndReseek
import org.apache.hadoop.hbase.KeyValue; //导入方法依赖的package包/类
/**
* Test the scanner and reseek of a half hfile scanner. The scanner API
* demands that seekTo and reseekTo() only return < 0 if the key lies
* before the start of the file (with no position on the scanner). Returning
* 0 if perfect match (rare), and return > 1 if we got an imperfect match.
*
* The latter case being the most common, we should generally be returning 1,
* and if we do, there may or may not be a 'next' in the scanner/file.
*
* A bug in the half file scanner was returning -1 at the end of the bottom
* half, and that was causing the infrastructure above to go null causing NPEs
* and other problems. This test reproduces that failure, and also tests
* both the bottom and top of the file while we are at it.
*
* @throws IOException
*/
@Test
public void testHalfScanAndReseek() throws IOException {
String root_dir = TEST_UTIL.getDataTestDir().toString();
Path p = new Path(root_dir, "test");
Configuration conf = TEST_UTIL.getConfiguration();
FileSystem fs = FileSystem.get(conf);
CacheConfig cacheConf = new CacheConfig(conf);
HFileContext meta = new HFileContextBuilder().withBlockSize(1024).build();
HFile.Writer w = HFile.getWriterFactory(conf, cacheConf)
.withPath(fs, p)
.withFileContext(meta)
.create();
// write some things.
List<KeyValue> items = genSomeKeys();
for (KeyValue kv : items) {
w.append(kv);
}
w.close();
HFile.Reader r = HFile.createReader(fs, p, cacheConf, conf);
r.loadFileInfo();
byte [] midkey = r.midkey();
KeyValue midKV = KeyValue.createKeyValueFromKey(midkey);
midkey = midKV.getRow();
//System.out.println("midkey: " + midKV + " or: " + Bytes.toStringBinary(midkey));
Reference bottom = new Reference(midkey, Reference.Range.bottom);
doTestOfScanAndReseek(p, fs, bottom, cacheConf);
Reference top = new Reference(midkey, Reference.Range.top);
doTestOfScanAndReseek(p, fs, top, cacheConf);
r.close();
}
示例4: testReference
import org.apache.hadoop.hbase.KeyValue; //导入方法依赖的package包/类
/**
* Test that our mechanism of writing store files in one region to reference
* store files in other regions works.
* @throws IOException
*/
public void testReference() throws IOException {
final HRegionInfo hri = new HRegionInfo(TableName.valueOf("testReferenceTb"));
HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem(
conf, fs, new Path(this.testDir, hri.getTable().getNameAsString()), hri);
HFileContext meta = new HFileContextBuilder().withBlockSize(8 * 1024).build();
// Make a store file and write data to it.
StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, this.fs)
.withFilePath(regionFs.createTempName())
.withFileContext(meta)
.build();
writeStoreFile(writer);
Path hsfPath = regionFs.commitStoreFile(TEST_FAMILY, writer.getPath());
StoreFile hsf = new StoreFile(this.fs, hsfPath, conf, cacheConf,
BloomType.NONE);
StoreFile.Reader reader = hsf.createReader();
// Split on a row, not in middle of row. Midkey returned by reader
// may be in middle of row. Create new one with empty column and
// timestamp.
KeyValue kv = KeyValue.createKeyValueFromKey(reader.midkey());
byte [] midRow = kv.getRow();
kv = KeyValue.createKeyValueFromKey(reader.getLastKey());
byte [] finalRow = kv.getRow();
hsf.closeReader(true);
// Make a reference
HRegionInfo splitHri = new HRegionInfo(hri.getTable(), null, midRow);
Path refPath = splitStoreFile(regionFs, splitHri, TEST_FAMILY, hsf, midRow, true);
StoreFile refHsf = new StoreFile(this.fs, refPath, conf, cacheConf,
BloomType.NONE);
// Now confirm that I can read from the reference and that it only gets
// keys from top half of the file.
HFileScanner s = refHsf.createReader().getScanner(false, false);
for(boolean first = true; (!s.isSeeked() && s.seekTo()) || s.next();) {
ByteBuffer bb = s.getKey();
kv = KeyValue.createKeyValueFromKey(bb);
if (first) {
assertTrue(Bytes.equals(kv.getRow(), midRow));
first = false;
}
}
assertTrue(Bytes.equals(kv.getRow(), finalRow));
}
示例5: testHalfScanner
import org.apache.hadoop.hbase.KeyValue; //导入方法依赖的package包/类
@Test
public void testHalfScanner() throws IOException {
String root_dir = TEST_UTIL.getDataTestDir().toString();
Path p = new Path(root_dir, "test");
Configuration conf = TEST_UTIL.getConfiguration();
FileSystem fs = FileSystem.get(conf);
CacheConfig cacheConf = new CacheConfig(conf);
HFileContext meta = new HFileContextBuilder().withBlockSize(1024).build();
HFile.Writer w = HFile.getWriterFactory(conf, cacheConf)
.withPath(fs, p)
.withFileContext(meta)
.create();
// write some things.
List<KeyValue> items = genSomeKeys();
for (KeyValue kv : items) {
w.append(kv);
}
w.close();
HFile.Reader r = HFile.createReader(fs, p, cacheConf, conf);
r.loadFileInfo();
byte[] midkey = r.midkey();
KeyValue midKV = KeyValue.createKeyValueFromKey(midkey);
midkey = midKV.getRow();
Reference bottom = new Reference(midkey, Reference.Range.bottom);
Reference top = new Reference(midkey, Reference.Range.top);
// Ugly code to get the item before the midkey
KeyValue beforeMidKey = null;
for (KeyValue item : items) {
if (KeyValue.COMPARATOR.compare(item, midKV) >= 0) {
break;
}
beforeMidKey = item;
}
System.out.println("midkey: " + midKV + " or: " + Bytes.toStringBinary(midkey));
System.out.println("beforeMidKey: " + beforeMidKey);
// Seek on the splitKey, should be in top, not in bottom
Cell foundKeyValue = doTestOfSeekBefore(p, fs, bottom, midKV, cacheConf);
assertEquals(beforeMidKey, foundKeyValue);
// Seek tot the last thing should be the penultimate on the top, the one before the midkey on the bottom.
foundKeyValue = doTestOfSeekBefore(p, fs, top, items.get(items.size() - 1), cacheConf);
assertEquals(items.get(items.size() - 2), foundKeyValue);
foundKeyValue = doTestOfSeekBefore(p, fs, bottom, items.get(items.size() - 1), cacheConf);
assertEquals(beforeMidKey, foundKeyValue);
// Try and seek before something that is in the bottom.
foundKeyValue = doTestOfSeekBefore(p, fs, top, items.get(0), cacheConf);
assertNull(foundKeyValue);
// Try and seek before the first thing.
foundKeyValue = doTestOfSeekBefore(p, fs, bottom, items.get(0), cacheConf);
assertNull(foundKeyValue);
// Try and seek before the second thing in the top and bottom.
foundKeyValue = doTestOfSeekBefore(p, fs, top, items.get(1), cacheConf);
assertNull(foundKeyValue);
foundKeyValue = doTestOfSeekBefore(p, fs, bottom, items.get(1), cacheConf);
assertEquals(items.get(0), foundKeyValue);
// Try to seek before the splitKey in the top file
foundKeyValue = doTestOfSeekBefore(p, fs, top, midKV, cacheConf);
assertNull(foundKeyValue);
}