当前位置: 首页>>代码示例>>Java>>正文


Java StoreFile.Reader方法代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.regionserver.StoreFile.Reader方法的典型用法代码示例。如果您正苦于以下问题:Java StoreFile.Reader方法的具体用法?Java StoreFile.Reader怎么用?Java StoreFile.Reader使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hbase.regionserver.StoreFile的用法示例。


在下文中一共展示了StoreFile.Reader方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: createMockStoreFile

import org.apache.hadoop.hbase.regionserver.StoreFile; //导入方法依赖的package包/类
protected StoreFile createMockStoreFile(final long sizeInBytes, final long seqId) {
  StoreFile mockSf = mock(StoreFile.class);
  StoreFile.Reader reader = mock(StoreFile.Reader.class);
  String stringPath = "/hbase/testTable/regionA/"
      + RandomStringUtils.random(FILENAME_LENGTH, 0, 0, true, true, null, random);
  Path path = new Path(stringPath);


  when(reader.getSequenceID()).thenReturn(seqId);
  when(reader.getTotalUncompressedBytes()).thenReturn(sizeInBytes);
  when(reader.length()).thenReturn(sizeInBytes);

  when(mockSf.getPath()).thenReturn(path);
  when(mockSf.excludeFromMinorCompaction()).thenReturn(false);
  when(mockSf.isReference()).thenReturn(false); // TODO come back to
  // this when selection takes this into account
  when(mockSf.getReader()).thenReturn(reader);
  String toString = Objects.toStringHelper("MockStoreFile")
      .add("isReference", false)
      .add("fileSize", StringUtils.humanReadableInt(sizeInBytes))
      .add("seqId", seqId)
      .add("path", stringPath).toString();
  when(mockSf.toString()).thenReturn(toString);

  return mockSf;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:27,代码来源:MockStoreFileGenerator.java

示例2: getFileDetails

import org.apache.hadoop.hbase.regionserver.StoreFile; //导入方法依赖的package包/类
/**
 * Extracts some details about the files to compact that are commonly needed by compactors.
 *
 * @param filesToCompact Files.
 * @param allFiles       Whether all files are included for compaction
 * @return The result.
 */
protected FileDetails getFileDetails(Collection<StoreFile> filesToCompact, boolean allFiles)
    throws IOException {
  FileDetails fd = new FileDetails();
  long oldestHFileTimeStampToKeepMVCC =
      System.currentTimeMillis() - (1000L * 60 * 60 * 24 * this.keepSeqIdPeriod);

  for (StoreFile file : filesToCompact) {
    if (allFiles && (file.getModificationTimeStamp() < oldestHFileTimeStampToKeepMVCC)) {
      // when isAllFiles is true, all files are compacted so we can calculate the smallest
      // MVCC value to keep
      if (fd.minSeqIdToKeep < file.getMaxMemstoreTS()) {
        fd.minSeqIdToKeep = file.getMaxMemstoreTS();
      }
    }
    long seqNum = file.getMaxSequenceId();
    fd.maxSeqId = Math.max(fd.maxSeqId, seqNum);
    StoreFile.Reader r = file.getReader();
    if (r == null) {
      LOG.warn("Null reader for " + file.getPath());
      continue;
    }
    // NOTE: use getEntries when compacting instead of getFilterEntries, otherwise under-sized
    // blooms can cause progress to be miscalculated or if the user switches bloom
    // type (e.g. from ROW to ROWCOL)
    long keyCount = r.getEntries();
    fd.maxKeyCount += keyCount;
    // calculate the latest MVCC readpoint in any of the involved store files
    Map<byte[], byte[]> fileInfo = r.loadFileInfo();
    byte tmp[] = null;
    // Get and set the real MVCCReadpoint for bulk loaded files, which is the
    // SeqId number.
    if (r.isBulkLoaded()) {
      fd.maxMVCCReadpoint = Math.max(fd.maxMVCCReadpoint, r.getSequenceID());
    } else {
      tmp = fileInfo.get(HFileWriterV2.MAX_MEMSTORE_TS_KEY);
      if (tmp != null) {
        fd.maxMVCCReadpoint = Math.max(fd.maxMVCCReadpoint, Bytes.toLong(tmp));
      }
    }
    tmp = fileInfo.get(FileInfo.MAX_TAGS_LEN);
    if (tmp != null) {
      fd.maxTagsLength = Math.max(fd.maxTagsLength, Bytes.toInt(tmp));
    }
    // If required, calculate the earliest put timestamp of all involved storefiles.
    // This is used to remove family delete marker during compaction.
    long earliestPutTs = 0;
    if (allFiles) {
      tmp = fileInfo.get(StoreFile.EARLIEST_PUT_TS);
      if (tmp == null) {
        // There's a file with no information, must be an old one
        // assume we have very old puts
        fd.earliestPutTs = earliestPutTs = HConstants.OLDEST_TIMESTAMP;
      } else {
        earliestPutTs = Bytes.toLong(tmp);
        fd.earliestPutTs = Math.min(fd.earliestPutTs, earliestPutTs);
      }
    }
    if (LOG.isDebugEnabled()) {
      LOG.debug("Compacting " + file + ", keycount=" + keyCount + ", bloomtype=" + r
          .getBloomFilterType().toString() + ", size=" + TraditionalBinaryPrefix
          .long2String(r.length(), "", 1) + ", encoding=" + r.getHFileReader()
          .getDataBlockEncoding() + ", seqNum=" + seqNum + (allFiles ?
          ", earliestPutTs=" + earliestPutTs :
          ""));
    }
  }
  return fd;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:76,代码来源:Compactor.java

示例3: getStoreFileScanner

import org.apache.hadoop.hbase.regionserver.StoreFile; //导入方法依赖的package包/类
private StoreFileScanner getStoreFileScanner(StoreFile storeFile) throws IOException {
  StoreFile.Reader r = storeFile.createReader(canUseDrop);
  r.setReplicaStoreFile(isPrimaryReplica);
  StoreFileScanner scanner = r.getStoreFileScanner(cacheBlocks, usePread, isCompaction, readPt);
  scanner.setScanQueryMatcher(matcher);
  return scanner;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:8,代码来源:LMDIndexDirectStoreFileScanner.java

示例4: createFile

import org.apache.hadoop.hbase.regionserver.StoreFile; //导入方法依赖的package包/类
private static StoreFile createFile(long size) throws Exception {
  StoreFile sf = mock(StoreFile.class);
  when(sf.getPath()).thenReturn(new Path("moo"));
  StoreFile.Reader r = mock(StoreFile.Reader.class);
  when(r.getEntries()).thenReturn(size);
  when(r.length()).thenReturn(size);
  when(r.getBloomFilterType()).thenReturn(BloomType.NONE);
  when(r.getHFileReader()).thenReturn(mock(HFile.Reader.class));
  when(r.getStoreFileScanner(anyBoolean(), anyBoolean(), anyBoolean(), anyLong())).thenReturn(
    mock(StoreFileScanner.class));
  when(sf.getReader()).thenReturn(r);
  when(sf.createReader(anyBoolean())).thenReturn(r);
  when(sf.createReader()).thenReturn(r);
  return sf;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:16,代码来源:TestStripeCompactionPolicy.java

示例5: preStoreFileReaderOpen

import org.apache.hadoop.hbase.regionserver.StoreFile; //导入方法依赖的package包/类
/**
 * Called before creation of Reader for a store file.
 * Calling {@link org.apache.hadoop.hbase.coprocessor.ObserverContext#bypass()} has no
 * effect in this hook.
 * 
 * @param ctx the environment provided by the region server
 * @param fs fileystem to read from
 * @param p path to the file
 * @param in {@link FSDataInputStreamWrapper}
 * @param size Full size of the file
 * @param cacheConf
 * @param r original reference file. This will be not null only when reading a split file.
 * @param reader the base reader, if not {@code null}, from previous RegionObserver in the chain
 * @return a Reader instance to use instead of the base reader if overriding
 * default behavior, null otherwise
 * @throws IOException
 */
StoreFile.Reader preStoreFileReaderOpen(final ObserverContext<RegionCoprocessorEnvironment> ctx,
    final FileSystem fs, final Path p, final FSDataInputStreamWrapper in, long size,
    final CacheConfig cacheConf, final Reference r, StoreFile.Reader reader) throws IOException;
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:21,代码来源:RegionObserver.java

示例6: postStoreFileReaderOpen

import org.apache.hadoop.hbase.regionserver.StoreFile; //导入方法依赖的package包/类
/**
 * Called after the creation of Reader for a store file.
 * 
 * @param ctx the environment provided by the region server
 * @param fs fileystem to read from
 * @param p path to the file
 * @param in {@link FSDataInputStreamWrapper}
 * @param size Full size of the file
 * @param cacheConf
 * @param r original reference file. This will be not null only when reading a split file.
 * @param reader the base reader instance
 * @return The reader to use
 * @throws IOException
 */
StoreFile.Reader postStoreFileReaderOpen(final ObserverContext<RegionCoprocessorEnvironment> ctx,
    final FileSystem fs, final Path p, final FSDataInputStreamWrapper in, long size,
    final CacheConfig cacheConf, final Reference r, StoreFile.Reader reader) throws IOException;
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:18,代码来源:RegionObserver.java


注:本文中的org.apache.hadoop.hbase.regionserver.StoreFile.Reader方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。