本文整理汇总了Java中org.apache.hadoop.hbase.regionserver.TimeRangeTracker类的典型用法代码示例。如果您正苦于以下问题:Java TimeRangeTracker类的具体用法?Java TimeRangeTracker怎么用?Java TimeRangeTracker使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
TimeRangeTracker类属于org.apache.hadoop.hbase.regionserver包,在下文中一共展示了TimeRangeTracker类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: LMDIndexWriter
import org.apache.hadoop.hbase.regionserver.TimeRangeTracker; //导入依赖的package包/类
public LMDIndexWriter(HStore store, Path rawDataPath, TimeRangeTracker timeRangeTracker,
String opType) {
tableRelation = store.indexTableRelation;
this.rawDataPath = rawDataPath;
this.store = store;
tracker = timeRangeTracker;
this.opType = opType;
lmdIndexParameters = store.getLMDIndexParameters();
int size = 0;
for (Map.Entry<byte[], TreeSet<byte[]>> entry : tableRelation.getIndexFamilyMap().entrySet()) {
size += entry.getValue().size();
}
dimensions = size;
int[] mins = new int[dimensions];
Arrays.fill(mins, 0);
}
示例2: compactLMDIndex
import org.apache.hadoop.hbase.regionserver.TimeRangeTracker; //导入依赖的package包/类
/**
* Do a minor/major compaction for store files' index.
*
* @param compactedFile if null, compact index from this file, else compact each StoreFile's index
* together
* @return Product of compaction or null if there is no index column cell
* @throws IOException
*/
void compactLMDIndex(final Path compactedFile, HStore store, TimeRangeTracker timeRangeTracker)
throws IOException {
if (LOG.isDebugEnabled()) {
LOG.debug(
"Generate intermediate index file from major compaction file=" + compactedFile + " in cf="
+ store.toString());
}
HFile.Reader reader =
HFile.createReader(store.fs.getFileSystem(), compactedFile, store.cacheConf, conf);
HFileScanner scanner = reader.getScanner(false, false, true);
Queue<KeyValue> rawRecords = new LinkedList<>();
int counter = 0;
try {
scanner.seekTo();
do {
KeyValue kv = (KeyValue) scanner.getKeyValue();
if (store.indexTableRelation.isIndexColumn(kv.getFamily(), kv.getQualifier())) {
rawRecords.add(kv);
}
++counter;
} while (scanner.next());
} finally {
if (reader != null) reader.close();
}
System.out.println("in compacted file=" + compactedFile + ", number of keyvalue=" + counter
+ ", for LMDIndex is:" + rawRecords.size());
LMDIndexWriter lmdIndexWriter =
new LMDIndexWriter(store, compactedFile, timeRangeTracker, "COMPACT");
lmdIndexWriter.processKeyValueQueue(rawRecords);
}
示例3: LCIndexWriter
import org.apache.hadoop.hbase.regionserver.TimeRangeTracker; //导入依赖的package包/类
public LCIndexWriter(HStore store, Path hdfsTmpPath, TimeRangeTracker tracker)
throws IOException {
this.store = store;
this.hdfsTmpPath = hdfsTmpPath;
this.tableRelation = store.indexTableRelation;
indexParameters = store.getLCIndexParameters();
this.statMap = LCStatInfo2.parseStatString(tableRelation,
store.getHRegion().getTableDesc().getValue(LCIndexConstant.LC_TABLE_DESC_RANGE_STR));
this.tracker = tracker;
Path dirPath = indexParameters.getTmpDirPath(hdfsTmpPath);
if (indexParameters.getLCIndexFileSystem().exists(dirPath)) {
indexParameters.getLCIndexFileSystem().mkdirs(dirPath);
}
}
示例4: FlushJob
import org.apache.hadoop.hbase.regionserver.TimeRangeTracker; //导入依赖的package包/类
public FlushJob(HStore store, LinkedList<KeyValue> keyvalueQueue, Path pathName,
TimeRangeTracker snapshotTimeRangeTracker) {
super(FlushJobQueue.getInstance().getJobQueueName());
this.store = store;
this.queue = keyvalueQueue;
this.rawHFilePath = pathName;
this.tracker = snapshotTimeRangeTracker;
printMessage("winter FlushJob construction, rawHFilePath: " + rawHFilePath);
}
示例5: LCIndexWriter
import org.apache.hadoop.hbase.regionserver.TimeRangeTracker; //导入依赖的package包/类
public LCIndexWriter(Store store, Path rawHFile, Map<byte[], Integer> statistic,
List<LCStatInfo> rangeInfoList, TimeRangeTracker tracker) throws IOException {
this.rangeInfoList = rangeInfoList;
if (store.localfs != null) {
basePath = store.getLocalBaseDirByFileName(rawHFile);
if (!store.localfs.exists(basePath)) { // create dir
store.localfs.mkdirs(basePath);
}
} else {
basePath = store.mWinterGetLCCIndexFilePathFromHFilePathInTmp(rawHFile);
}
for (Entry<byte[], Integer> entry : statistic.entrySet()) {
if (entry.getValue() > 0) {
// targetPath = /hbase/lcc/AAA/.tmp/BBB.lccindex/Q1-Q4
Path targetPath = new Path(basePath, Bytes.toString(entry.getKey()));
StoreFile.Writer writer = null;
if (store.localfs != null) {
writer =
store.mWinterCreateWriterInLocalTmp(entry.getValue(), store.family.getCompression(),
false, true, targetPath);
writeStatInfo(store.localfs, basePath, Bytes.toString(entry.getKey()));
} else {
writer =
store.mWinterCreateWriterInTmp(entry.getValue(), store.family.getCompression(),
false, true, targetPath);
writeStatInfo(store.fs, basePath, Bytes.toString(entry.getKey()));
}
if (tracker != null) {
writer.setTimeRangeTracker(tracker);
}
indexWriters.put(entry.getKey(), writer);
} else {
System.out.println("winter ignore cf: " + Bytes.toString(entry.getKey())
+ " bacause it contains " + entry.getValue() + " rows");
}
}
}
示例6: FlushJob
import org.apache.hadoop.hbase.regionserver.TimeRangeTracker; //导入依赖的package包/类
public FlushJob(Store store, Queue<KeyValue> keyvalueQueue, Path pathName,
TimeRangeTracker snapshotTimeRangeTracker) {
this.store = store;
this.queue = keyvalueQueue;
this.rawPath = pathName;
this.tracker = snapshotTimeRangeTracker;
if (printForDebug) {
System.out.println("winter FlushJob construction, rawPath: " + rawPath);
}
}
示例7: flushSnapshot
import org.apache.hadoop.hbase.regionserver.TimeRangeTracker; //导入依赖的package包/类
@Override
public List<Path> flushSnapshot(SortedSet<KeyValue> snapshot, long cacheFlushId,
TimeRangeTracker snapshotTimeRangeTracker, AtomicLong flushedSize, MonitoredTask status)
throws IOException {
if (throwExceptionWhenFlushing.get()) {
throw new IOException("Simulated exception by tests");
}
return super.flushSnapshot(snapshot, cacheFlushId, snapshotTimeRangeTracker,
flushedSize, status);
}
示例8: test_TIMERANGE
import org.apache.hadoop.hbase.regionserver.TimeRangeTracker; //导入依赖的package包/类
@Test
public void test_TIMERANGE() throws Exception {
Configuration conf = new Configuration(this.util.getConfiguration());
RecordWriter<ImmutableBytesWritable, KeyValue> writer = null;
TaskAttemptContext context = null;
Path dir =
util.getDataTestDir("test_TIMERANGE_present");
LOG.info("Timerange dir writing to dir: "+ dir);
try {
// build a record writer using HFileOutputFormat
Job job = new Job(conf);
FileOutputFormat.setOutputPath(job, dir);
context = getTestTaskAttemptContext(job);
HFileOutputFormat hof = new HFileOutputFormat();
writer = hof.getRecordWriter(context);
// Pass two key values with explicit times stamps
final byte [] b = Bytes.toBytes("b");
// value 1 with timestamp 2000
KeyValue kv = new KeyValue(b, b, b, 2000, b);
KeyValue original = kv.clone();
writer.write(new ImmutableBytesWritable(), kv);
assertEquals(original,kv);
// value 2 with timestamp 1000
kv = new KeyValue(b, b, b, 1000, b);
original = kv.clone();
writer.write(new ImmutableBytesWritable(), kv);
assertEquals(original, kv);
// verify that the file has the proper FileInfo.
writer.close(context);
// the generated file lives 1 directory down from the attempt directory
// and is the only file, e.g.
// _attempt__0000_r_000000_0/b/1979617994050536795
FileSystem fs = FileSystem.get(conf);
Path attemptDirectory = hof.getDefaultWorkFile(context, "").getParent();
FileStatus[] sub1 = fs.listStatus(attemptDirectory);
FileStatus[] file = fs.listStatus(sub1[0].getPath());
// open as HFile Reader and pull out TIMERANGE FileInfo.
HFile.Reader rd = HFile.createReader(fs, file[0].getPath(),
new CacheConfig(conf));
Map<byte[],byte[]> finfo = rd.loadFileInfo();
byte[] range = finfo.get("TIMERANGE".getBytes());
assertNotNull(range);
// unmarshall and check values.
TimeRangeTracker timeRangeTracker = new TimeRangeTracker();
Writables.copyWritable(range, timeRangeTracker);
LOG.info(timeRangeTracker.getMinimumTimestamp() +
"...." + timeRangeTracker.getMaximumTimestamp());
assertEquals(1000, timeRangeTracker.getMinimumTimestamp());
assertEquals(2000, timeRangeTracker.getMaximumTimestamp());
rd.close();
} finally {
if (writer != null && context != null) writer.close(context);
dir.getFileSystem(conf).delete(dir, true);
}
}
示例9: test_TIMERANGE
import org.apache.hadoop.hbase.regionserver.TimeRangeTracker; //导入依赖的package包/类
@Test
public void test_TIMERANGE() throws Exception {
Configuration conf = new Configuration(this.util.getConfiguration());
RecordWriter<ImmutableBytesWritable, Cell> writer = null;
TaskAttemptContext context = null;
Path dir =
util.getDataTestDir("test_TIMERANGE_present");
LOG.info("Timerange dir writing to dir: "+ dir);
try {
// build a record writer using HFileOutputFormat2
Job job = new Job(conf);
FileOutputFormat.setOutputPath(job, dir);
context = createTestTaskAttemptContext(job);
HFileOutputFormat2 hof = new HFileOutputFormat2();
writer = hof.getRecordWriter(context);
// Pass two key values with explicit times stamps
final byte [] b = Bytes.toBytes("b");
// value 1 with timestamp 2000
KeyValue kv = new KeyValue(b, b, b, 2000, b);
KeyValue original = kv.clone();
writer.write(new ImmutableBytesWritable(), kv);
assertEquals(original,kv);
// value 2 with timestamp 1000
kv = new KeyValue(b, b, b, 1000, b);
original = kv.clone();
writer.write(new ImmutableBytesWritable(), kv);
assertEquals(original, kv);
// verify that the file has the proper FileInfo.
writer.close(context);
// the generated file lives 1 directory down from the attempt directory
// and is the only file, e.g.
// _attempt__0000_r_000000_0/b/1979617994050536795
FileSystem fs = FileSystem.get(conf);
Path attemptDirectory = hof.getDefaultWorkFile(context, "").getParent();
FileStatus[] sub1 = fs.listStatus(attemptDirectory);
FileStatus[] file = fs.listStatus(sub1[0].getPath());
// open as HFile Reader and pull out TIMERANGE FileInfo.
HFile.Reader rd = HFile.createReader(fs, file[0].getPath(),
new CacheConfig(conf), conf);
Map<byte[],byte[]> finfo = rd.loadFileInfo();
byte[] range = finfo.get("TIMERANGE".getBytes());
assertNotNull(range);
// unmarshall and check values.
TimeRangeTracker timeRangeTracker = new TimeRangeTracker();
Writables.copyWritable(range, timeRangeTracker);
LOG.info(timeRangeTracker.getMinimumTimestamp() +
"...." + timeRangeTracker.getMaximumTimestamp());
assertEquals(1000, timeRangeTracker.getMinimumTimestamp());
assertEquals(2000, timeRangeTracker.getMaximumTimestamp());
rd.close();
} finally {
if (writer != null && context != null) writer.close(context);
dir.getFileSystem(conf).delete(dir, true);
}
}
示例10: test_TIMERANGE
import org.apache.hadoop.hbase.regionserver.TimeRangeTracker; //导入依赖的package包/类
@Test
public void test_TIMERANGE() throws Exception {
Configuration conf = new Configuration(this.util.getConfiguration());
RecordWriter<ImmutableBytesWritable, KeyValue> writer = null;
TaskAttemptContext context = null;
Path dir =
util.getDataTestDir("test_TIMERANGE_present");
LOG.info("Timerange dir writing to dir: "+ dir);
try {
// build a record writer using HFileOutputFormat
Job job = new Job(conf);
FileOutputFormat.setOutputPath(job, dir);
context = createTestTaskAttemptContext(job);
HFileOutputFormat hof = new HFileOutputFormat();
writer = hof.getRecordWriter(context);
// Pass two key values with explicit times stamps
final byte [] b = Bytes.toBytes("b");
// value 1 with timestamp 2000
KeyValue kv = new KeyValue(b, b, b, 2000, b);
KeyValue original = kv.clone();
writer.write(new ImmutableBytesWritable(), kv);
assertEquals(original,kv);
// value 2 with timestamp 1000
kv = new KeyValue(b, b, b, 1000, b);
original = kv.clone();
writer.write(new ImmutableBytesWritable(), kv);
assertEquals(original, kv);
// verify that the file has the proper FileInfo.
writer.close(context);
// the generated file lives 1 directory down from the attempt directory
// and is the only file, e.g.
// _attempt__0000_r_000000_0/b/1979617994050536795
FileSystem fs = FileSystem.get(conf);
Path attemptDirectory = hof.getDefaultWorkFile(context, "").getParent();
FileStatus[] sub1 = fs.listStatus(attemptDirectory);
FileStatus[] file = fs.listStatus(sub1[0].getPath());
// open as HFile Reader and pull out TIMERANGE FileInfo.
HFile.Reader rd = HFile.createReader(fs, file[0].getPath(),
new CacheConfig(conf), conf);
Map<byte[],byte[]> finfo = rd.loadFileInfo();
byte[] range = finfo.get("TIMERANGE".getBytes());
assertNotNull(range);
// unmarshall and check values.
TimeRangeTracker timeRangeTracker = new TimeRangeTracker();
Writables.copyWritable(range, timeRangeTracker);
LOG.info(timeRangeTracker.getMinimumTimestamp() +
"...." + timeRangeTracker.getMaximumTimestamp());
assertEquals(1000, timeRangeTracker.getMinimumTimestamp());
assertEquals(2000, timeRangeTracker.getMaximumTimestamp());
rd.close();
} finally {
if (writer != null && context != null) writer.close(context);
dir.getFileSystem(conf).delete(dir, true);
}
}
示例11: getFileDetails
import org.apache.hadoop.hbase.regionserver.TimeRangeTracker; //导入依赖的package包/类
/**
* Extracts some details about the files to compact that are commonly needed by compactors.
* @param filesToCompact Files.
* @param allFiles Whether all files are included for compaction
* @return The result.
*/
private FileDetails getFileDetails(
Collection<HStoreFile> filesToCompact, boolean allFiles) throws IOException {
FileDetails fd = new FileDetails();
long oldestHFileTimeStampToKeepMVCC = System.currentTimeMillis() -
(1000L * 60 * 60 * 24 * this.keepSeqIdPeriod);
for (HStoreFile file : filesToCompact) {
if(allFiles && (file.getModificationTimeStamp() < oldestHFileTimeStampToKeepMVCC)) {
// when isAllFiles is true, all files are compacted so we can calculate the smallest
// MVCC value to keep
if(fd.minSeqIdToKeep < file.getMaxMemStoreTS()) {
fd.minSeqIdToKeep = file.getMaxMemStoreTS();
}
}
long seqNum = file.getMaxSequenceId();
fd.maxSeqId = Math.max(fd.maxSeqId, seqNum);
StoreFileReader r = file.getReader();
if (r == null) {
LOG.warn("Null reader for " + file.getPath());
continue;
}
// NOTE: use getEntries when compacting instead of getFilterEntries, otherwise under-sized
// blooms can cause progress to be miscalculated or if the user switches bloom
// type (e.g. from ROW to ROWCOL)
long keyCount = r.getEntries();
fd.maxKeyCount += keyCount;
// calculate the latest MVCC readpoint in any of the involved store files
Map<byte[], byte[]> fileInfo = r.loadFileInfo();
byte[] tmp = null;
// Get and set the real MVCCReadpoint for bulk loaded files, which is the
// SeqId number.
if (r.isBulkLoaded()) {
fd.maxMVCCReadpoint = Math.max(fd.maxMVCCReadpoint, r.getSequenceID());
}
else {
tmp = fileInfo.get(HFile.Writer.MAX_MEMSTORE_TS_KEY);
if (tmp != null) {
fd.maxMVCCReadpoint = Math.max(fd.maxMVCCReadpoint, Bytes.toLong(tmp));
}
}
tmp = fileInfo.get(FileInfo.MAX_TAGS_LEN);
if (tmp != null) {
fd.maxTagsLength = Math.max(fd.maxTagsLength, Bytes.toInt(tmp));
}
// If required, calculate the earliest put timestamp of all involved storefiles.
// This is used to remove family delete marker during compaction.
long earliestPutTs = 0;
if (allFiles) {
tmp = fileInfo.get(EARLIEST_PUT_TS);
if (tmp == null) {
// There's a file with no information, must be an old one
// assume we have very old puts
fd.earliestPutTs = earliestPutTs = HConstants.OLDEST_TIMESTAMP;
} else {
earliestPutTs = Bytes.toLong(tmp);
fd.earliestPutTs = Math.min(fd.earliestPutTs, earliestPutTs);
}
}
tmp = fileInfo.get(TIMERANGE_KEY);
fd.latestPutTs = tmp == null ? HConstants.LATEST_TIMESTAMP: TimeRangeTracker.parseFrom(tmp).getMax();
if (LOG.isDebugEnabled()) {
LOG.debug("Compacting " + file +
", keycount=" + keyCount +
", bloomtype=" + r.getBloomFilterType().toString() +
", size=" + TraditionalBinaryPrefix.long2String(r.length(), "", 1) +
", encoding=" + r.getHFileReader().getDataBlockEncoding() +
", seqNum=" + seqNum +
(allFiles ? ", earliestPutTs=" + earliestPutTs: ""));
}
}
return fd;
}
示例12: test_TIMERANGE
import org.apache.hadoop.hbase.regionserver.TimeRangeTracker; //导入依赖的package包/类
@Ignore("Goes zombie too frequently; needs work. See HBASE-14563") @Test
public void test_TIMERANGE() throws Exception {
Configuration conf = new Configuration(this.util.getConfiguration());
RecordWriter<ImmutableBytesWritable, Cell> writer = null;
TaskAttemptContext context = null;
Path dir =
util.getDataTestDir("test_TIMERANGE_present");
LOG.info("Timerange dir writing to dir: "+ dir);
try {
// build a record writer using HFileOutputFormat2
Job job = new Job(conf);
FileOutputFormat.setOutputPath(job, dir);
context = createTestTaskAttemptContext(job);
HFileOutputFormat2 hof = new HFileOutputFormat2();
writer = hof.getRecordWriter(context);
// Pass two key values with explicit times stamps
final byte [] b = Bytes.toBytes("b");
// value 1 with timestamp 2000
KeyValue kv = new KeyValue(b, b, b, 2000, b);
KeyValue original = kv.clone();
writer.write(new ImmutableBytesWritable(), kv);
assertEquals(original,kv);
// value 2 with timestamp 1000
kv = new KeyValue(b, b, b, 1000, b);
original = kv.clone();
writer.write(new ImmutableBytesWritable(), kv);
assertEquals(original, kv);
// verify that the file has the proper FileInfo.
writer.close(context);
// the generated file lives 1 directory down from the attempt directory
// and is the only file, e.g.
// _attempt__0000_r_000000_0/b/1979617994050536795
FileSystem fs = FileSystem.get(conf);
Path attemptDirectory = hof.getDefaultWorkFile(context, "").getParent();
FileStatus[] sub1 = fs.listStatus(attemptDirectory);
FileStatus[] file = fs.listStatus(sub1[0].getPath());
// open as HFile Reader and pull out TIMERANGE FileInfo.
HFile.Reader rd =
HFile.createReader(fs, file[0].getPath(), new CacheConfig(conf), true, conf);
Map<byte[],byte[]> finfo = rd.loadFileInfo();
byte[] range = finfo.get("TIMERANGE".getBytes("UTF-8"));
assertNotNull(range);
// unmarshall and check values.
TimeRangeTracker timeRangeTracker =TimeRangeTracker.parseFrom(range);
LOG.info(timeRangeTracker.getMin() +
"...." + timeRangeTracker.getMax());
assertEquals(1000, timeRangeTracker.getMin());
assertEquals(2000, timeRangeTracker.getMax());
rd.close();
} finally {
if (writer != null && context != null) writer.close(context);
dir.getFileSystem(conf).delete(dir, true);
}
}
示例13: printMeta
import org.apache.hadoop.hbase.regionserver.TimeRangeTracker; //导入依赖的package包/类
private void printMeta(HFile.Reader reader, Map<byte[], byte[]> fileInfo)
throws IOException {
System.out.println("Block index size as per heapsize: "
+ reader.indexSize());
System.out.println(asSeparateLines(reader.toString()));
System.out.println("Trailer:\n "
+ asSeparateLines(reader.getTrailer().toString()));
System.out.println("Fileinfo:");
for (Map.Entry<byte[], byte[]> e : fileInfo.entrySet()) {
System.out.print(FOUR_SPACES + Bytes.toString(e.getKey()) + " = ");
if (Bytes.compareTo(e.getKey(), Bytes.toBytes("MAX_SEQ_ID_KEY")) == 0) {
long seqid = Bytes.toLong(e.getValue());
System.out.println(seqid);
} else if (Bytes.compareTo(e.getKey(), Bytes.toBytes("TIMERANGE")) == 0) {
TimeRangeTracker timeRangeTracker = new TimeRangeTracker();
Writables.copyWritable(e.getValue(), timeRangeTracker);
System.out.println(timeRangeTracker.getMinimumTimestamp() + "...."
+ timeRangeTracker.getMaximumTimestamp());
} else if (Bytes.compareTo(e.getKey(), FileInfo.AVG_KEY_LEN) == 0
|| Bytes.compareTo(e.getKey(), FileInfo.AVG_VALUE_LEN) == 0) {
System.out.println(Bytes.toInt(e.getValue()));
} else {
System.out.println(Bytes.toStringBinary(e.getValue()));
}
}
System.out.println("Mid-key: " + Bytes.toStringBinary(reader.midkey()));
// Printing bloom information
DataInput bloomMeta = reader.getBloomFilterMetadata();
BloomFilter bloomFilter = null;
if (bloomMeta != null)
bloomFilter = BloomFilterFactory.createFromMeta(bloomMeta, reader);
System.out.println("Bloom filter:");
if (bloomFilter != null) {
System.out.println(FOUR_SPACES + bloomFilter.toString().replaceAll(
ByteBloomFilter.STATS_RECORD_SEP, "\n" + FOUR_SPACES));
} else {
System.out.println(FOUR_SPACES + "Not present");
}
}
示例14: test_TIMERANGE
import org.apache.hadoop.hbase.regionserver.TimeRangeTracker; //导入依赖的package包/类
@Test
public void test_TIMERANGE() throws Exception {
Configuration conf = new Configuration(this.util.getConfiguration());
RecordWriter<ImmutableBytesWritable, KeyValue> writer = null;
TaskAttemptContext context = null;
Path dir =
util.getDataTestDir("test_TIMERANGE_present");
LOG.info("Timerange dir writing to dir: "+ dir);
try {
// build a record writer using HFileOutputFormat
Job job = new Job(conf);
FileOutputFormat.setOutputPath(job, dir);
context = createTestTaskAttemptContext(job);
HFileOutputFormat hof = new HFileOutputFormat();
writer = hof.getRecordWriter(context);
// Pass two key values with explicit times stamps
final byte [] b = Bytes.toBytes("b");
// value 1 with timestamp 2000
KeyValue kv = new KeyValue(b, b, b, 2000, b);
KeyValue original = kv.clone();
writer.write(new ImmutableBytesWritable(), kv);
assertEquals(original,kv);
// value 2 with timestamp 1000
kv = new KeyValue(b, b, b, 1000, b);
original = kv.clone();
writer.write(new ImmutableBytesWritable(), kv);
assertEquals(original, kv);
// verify that the file has the proper FileInfo.
writer.close(context);
// the generated file lives 1 directory down from the attempt directory
// and is the only file, e.g.
// _attempt__0000_r_000000_0/b/1979617994050536795
FileSystem fs = FileSystem.get(conf);
Path attemptDirectory = hof.getDefaultWorkFile(context, "").getParent();
FileStatus[] sub1 = fs.listStatus(attemptDirectory);
FileStatus[] file = fs.listStatus(sub1[0].getPath());
// open as HFile Reader and pull out TIMERANGE FileInfo.
HFile.Reader rd = HFile.createReader(fs, file[0].getPath(),
new CacheConfig(conf));
Map<byte[],byte[]> finfo = rd.loadFileInfo();
byte[] range = finfo.get("TIMERANGE".getBytes());
assertNotNull(range);
// unmarshall and check values.
TimeRangeTracker timeRangeTracker = new TimeRangeTracker();
Writables.copyWritable(range, timeRangeTracker);
LOG.info(timeRangeTracker.getMinimumTimestamp() +
"...." + timeRangeTracker.getMaximumTimestamp());
assertEquals(1000, timeRangeTracker.getMinimumTimestamp());
assertEquals(2000, timeRangeTracker.getMaximumTimestamp());
rd.close();
} finally {
if (writer != null && context != null) writer.close(context);
dir.getFileSystem(conf).delete(dir, true);
}
}
示例15: printMeta
import org.apache.hadoop.hbase.regionserver.TimeRangeTracker; //导入依赖的package包/类
private void printMeta(HFile.Reader reader, Map<byte[], byte[]> fileInfo)
throws IOException {
System.out.println("Block index size as per heapsize: "
+ reader.indexSize());
System.out.println(asSeparateLines(reader.toString()));
System.out.println("Trailer:\n "
+ asSeparateLines(reader.getTrailer().toString()));
System.out.println("Fileinfo:");
for (Map.Entry<byte[], byte[]> e : fileInfo.entrySet()) {
System.out.print(FOUR_SPACES + Bytes.toString(e.getKey()) + " = ");
if (Bytes.compareTo(e.getKey(), Bytes.toBytes("MAX_SEQ_ID_KEY")) == 0) {
long seqid = Bytes.toLong(e.getValue());
System.out.println(seqid);
} else if (Bytes.compareTo(e.getKey(), Bytes.toBytes("TIMERANGE")) == 0) {
TimeRangeTracker timeRangeTracker = new TimeRangeTracker();
Writables.copyWritable(e.getValue(), timeRangeTracker);
System.out.println(timeRangeTracker.getMinimumTimestamp() + "...."
+ timeRangeTracker.getMaximumTimestamp());
} else if (Bytes.compareTo(e.getKey(), FileInfo.AVG_KEY_LEN) == 0
|| Bytes.compareTo(e.getKey(), FileInfo.AVG_VALUE_LEN) == 0) {
System.out.println(Bytes.toInt(e.getValue()));
} else {
System.out.println(Bytes.toStringBinary(e.getValue()));
}
}
System.out.println("Mid-key: " + Bytes.toStringBinary(reader.midkey()));
// Printing general bloom information
DataInput bloomMeta = reader.getGeneralBloomFilterMetadata();
BloomFilter bloomFilter = null;
if (bloomMeta != null)
bloomFilter = BloomFilterFactory.createFromMeta(bloomMeta, reader);
System.out.println("Bloom filter:");
if (bloomFilter != null) {
System.out.println(FOUR_SPACES + bloomFilter.toString().replaceAll(
ByteBloomFilter.STATS_RECORD_SEP, "\n" + FOUR_SPACES));
} else {
System.out.println(FOUR_SPACES + "Not present");
}
// Printing delete bloom information
bloomMeta = reader.getDeleteBloomFilterMetadata();
bloomFilter = null;
if (bloomMeta != null)
bloomFilter = BloomFilterFactory.createFromMeta(bloomMeta, reader);
System.out.println("Delete Family Bloom filter:");
if (bloomFilter != null) {
System.out.println(FOUR_SPACES
+ bloomFilter.toString().replaceAll(ByteBloomFilter.STATS_RECORD_SEP,
"\n" + FOUR_SPACES));
} else {
System.out.println(FOUR_SPACES + "Not present");
}
}