本文整理汇总了Java中org.apache.hadoop.hbase.regionserver.StoreFile.getReader方法的典型用法代码示例。如果您正苦于以下问题:Java StoreFile.getReader方法的具体用法?Java StoreFile.getReader怎么用?Java StoreFile.getReader使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hbase.regionserver.StoreFile
的用法示例。
在下文中一共展示了StoreFile.getReader方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getFileDetails
import org.apache.hadoop.hbase.regionserver.StoreFile; //导入方法依赖的package包/类
/**
* Extracts some details about the files to compact that are commonly needed by compactors.
*
* @param filesToCompact Files.
* @param allFiles Whether all files are included for compaction
* @return The result.
*/
protected FileDetails getFileDetails(Collection<StoreFile> filesToCompact, boolean allFiles)
throws IOException {
FileDetails fd = new FileDetails();
long oldestHFileTimeStampToKeepMVCC =
System.currentTimeMillis() - (1000L * 60 * 60 * 24 * this.keepSeqIdPeriod);
for (StoreFile file : filesToCompact) {
if (allFiles && (file.getModificationTimeStamp() < oldestHFileTimeStampToKeepMVCC)) {
// when isAllFiles is true, all files are compacted so we can calculate the smallest
// MVCC value to keep
if (fd.minSeqIdToKeep < file.getMaxMemstoreTS()) {
fd.minSeqIdToKeep = file.getMaxMemstoreTS();
}
}
long seqNum = file.getMaxSequenceId();
fd.maxSeqId = Math.max(fd.maxSeqId, seqNum);
StoreFile.Reader r = file.getReader();
if (r == null) {
LOG.warn("Null reader for " + file.getPath());
continue;
}
// NOTE: use getEntries when compacting instead of getFilterEntries, otherwise under-sized
// blooms can cause progress to be miscalculated or if the user switches bloom
// type (e.g. from ROW to ROWCOL)
long keyCount = r.getEntries();
fd.maxKeyCount += keyCount;
// calculate the latest MVCC readpoint in any of the involved store files
Map<byte[], byte[]> fileInfo = r.loadFileInfo();
byte tmp[] = null;
// Get and set the real MVCCReadpoint for bulk loaded files, which is the
// SeqId number.
if (r.isBulkLoaded()) {
fd.maxMVCCReadpoint = Math.max(fd.maxMVCCReadpoint, r.getSequenceID());
} else {
tmp = fileInfo.get(HFileWriterV2.MAX_MEMSTORE_TS_KEY);
if (tmp != null) {
fd.maxMVCCReadpoint = Math.max(fd.maxMVCCReadpoint, Bytes.toLong(tmp));
}
}
tmp = fileInfo.get(FileInfo.MAX_TAGS_LEN);
if (tmp != null) {
fd.maxTagsLength = Math.max(fd.maxTagsLength, Bytes.toInt(tmp));
}
// If required, calculate the earliest put timestamp of all involved storefiles.
// This is used to remove family delete marker during compaction.
long earliestPutTs = 0;
if (allFiles) {
tmp = fileInfo.get(StoreFile.EARLIEST_PUT_TS);
if (tmp == null) {
// There's a file with no information, must be an old one
// assume we have very old puts
fd.earliestPutTs = earliestPutTs = HConstants.OLDEST_TIMESTAMP;
} else {
earliestPutTs = Bytes.toLong(tmp);
fd.earliestPutTs = Math.min(fd.earliestPutTs, earliestPutTs);
}
}
if (LOG.isDebugEnabled()) {
LOG.debug("Compacting " + file + ", keycount=" + keyCount + ", bloomtype=" + r
.getBloomFilterType().toString() + ", size=" + TraditionalBinaryPrefix
.long2String(r.length(), "", 1) + ", encoding=" + r.getHFileReader()
.getDataBlockEncoding() + ", seqNum=" + seqNum + (allFiles ?
", earliestPutTs=" + earliestPutTs :
""));
}
}
return fd;
}
示例2: recalculateSize
import org.apache.hadoop.hbase.regionserver.StoreFile; //导入方法依赖的package包/类
/**
* Recalculate the size of the compaction based on current files.
* @param files files that should be included in the compaction
*/
private void recalculateSize() {
long sz = 0;
for (StoreFile sf : this.filesToCompact) {
Reader r = sf.getReader();
sz += r == null ? 0 : r.length();
}
this.totalSize = sz;
}