当前位置: 首页>>代码示例>>Java>>正文


Java HFileWriterV2类代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.io.hfile.HFileWriterV2的典型用法代码示例。如果您正苦于以下问题:Java HFileWriterV2类的具体用法?Java HFileWriterV2怎么用?Java HFileWriterV2使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


HFileWriterV2类属于org.apache.hadoop.hbase.io.hfile包,在下文中一共展示了HFileWriterV2类的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: getFileDetails

import org.apache.hadoop.hbase.io.hfile.HFileWriterV2; //导入依赖的package包/类
/**
 * Extracts some details about the files to compact that are commonly needed by compactors.
 *
 * @param filesToCompact Files.
 * @param allFiles       Whether all files are included for compaction
 * @return The result.
 */
protected FileDetails getFileDetails(Collection<StoreFile> filesToCompact, boolean allFiles)
    throws IOException {
  FileDetails fd = new FileDetails();
  long oldestHFileTimeStampToKeepMVCC =
      System.currentTimeMillis() - (1000L * 60 * 60 * 24 * this.keepSeqIdPeriod);

  for (StoreFile file : filesToCompact) {
    if (allFiles && (file.getModificationTimeStamp() < oldestHFileTimeStampToKeepMVCC)) {
      // when isAllFiles is true, all files are compacted so we can calculate the smallest
      // MVCC value to keep
      if (fd.minSeqIdToKeep < file.getMaxMemstoreTS()) {
        fd.minSeqIdToKeep = file.getMaxMemstoreTS();
      }
    }
    long seqNum = file.getMaxSequenceId();
    fd.maxSeqId = Math.max(fd.maxSeqId, seqNum);
    StoreFile.Reader r = file.getReader();
    if (r == null) {
      LOG.warn("Null reader for " + file.getPath());
      continue;
    }
    // NOTE: use getEntries when compacting instead of getFilterEntries, otherwise under-sized
    // blooms can cause progress to be miscalculated or if the user switches bloom
    // type (e.g. from ROW to ROWCOL)
    long keyCount = r.getEntries();
    fd.maxKeyCount += keyCount;
    // calculate the latest MVCC readpoint in any of the involved store files
    Map<byte[], byte[]> fileInfo = r.loadFileInfo();
    byte tmp[] = null;
    // Get and set the real MVCCReadpoint for bulk loaded files, which is the
    // SeqId number.
    if (r.isBulkLoaded()) {
      fd.maxMVCCReadpoint = Math.max(fd.maxMVCCReadpoint, r.getSequenceID());
    } else {
      tmp = fileInfo.get(HFileWriterV2.MAX_MEMSTORE_TS_KEY);
      if (tmp != null) {
        fd.maxMVCCReadpoint = Math.max(fd.maxMVCCReadpoint, Bytes.toLong(tmp));
      }
    }
    tmp = fileInfo.get(FileInfo.MAX_TAGS_LEN);
    if (tmp != null) {
      fd.maxTagsLength = Math.max(fd.maxTagsLength, Bytes.toInt(tmp));
    }
    // If required, calculate the earliest put timestamp of all involved storefiles.
    // This is used to remove family delete marker during compaction.
    long earliestPutTs = 0;
    if (allFiles) {
      tmp = fileInfo.get(StoreFile.EARLIEST_PUT_TS);
      if (tmp == null) {
        // There's a file with no information, must be an old one
        // assume we have very old puts
        fd.earliestPutTs = earliestPutTs = HConstants.OLDEST_TIMESTAMP;
      } else {
        earliestPutTs = Bytes.toLong(tmp);
        fd.earliestPutTs = Math.min(fd.earliestPutTs, earliestPutTs);
      }
    }
    if (LOG.isDebugEnabled()) {
      LOG.debug("Compacting " + file + ", keycount=" + keyCount + ", bloomtype=" + r
          .getBloomFilterType().toString() + ", size=" + TraditionalBinaryPrefix
          .long2String(r.length(), "", 1) + ", encoding=" + r.getHFileReader()
          .getDataBlockEncoding() + ", seqNum=" + seqNum + (allFiles ?
          ", earliestPutTs=" + earliestPutTs :
          ""));
    }
  }
  return fd;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:76,代码来源:Compactor.java

示例2: getFileDetails

import org.apache.hadoop.hbase.io.hfile.HFileWriterV2; //导入依赖的package包/类
/**
 * Extracts some details about the files to compact that are commonly needed by compactors.
 * @param filesToCompact Files.
 * @param allFiles Whether all files are included for compaction
 * @return The result.
 */
protected FileDetails getFileDetails(
    Collection<StoreFile> filesToCompact, boolean allFiles) throws IOException {
  FileDetails fd = new FileDetails();
  long oldestHFileTimeStampToKeepMVCC = System.currentTimeMillis() - 
    (1000L * 60 * 60 * 24 * this.keepSeqIdPeriod);  

  for (StoreFile file : filesToCompact) {
    if(allFiles && (file.getModificationTimeStamp() < oldestHFileTimeStampToKeepMVCC)) {
      // when isAllFiles is true, all files are compacted so we can calculate the smallest 
      // MVCC value to keep
      if(fd.minSeqIdToKeep < file.getMaxMemstoreTS()) {
        fd.minSeqIdToKeep = file.getMaxMemstoreTS();
      }
    }
    long seqNum = file.getMaxSequenceId();
    fd.maxSeqId = Math.max(fd.maxSeqId, seqNum);
    StoreFile.Reader r = file.getReader();
    if (r == null) {
      LOG.warn("Null reader for " + file.getPath());
      continue;
    }
    // NOTE: use getEntries when compacting instead of getFilterEntries, otherwise under-sized
    // blooms can cause progress to be miscalculated or if the user switches bloom
    // type (e.g. from ROW to ROWCOL)
    long keyCount = r.getEntries();
    fd.maxKeyCount += keyCount;
    // calculate the latest MVCC readpoint in any of the involved store files
    Map<byte[], byte[]> fileInfo = r.loadFileInfo();
    byte tmp[] = null;
    // Get and set the real MVCCReadpoint for bulk loaded files, which is the
    // SeqId number.
    if (r.isBulkLoaded()) {
      fd.maxMVCCReadpoint = Math.max(fd.maxMVCCReadpoint, r.getSequenceID());
    }
    else {
      tmp = fileInfo.get(HFileWriterV2.MAX_MEMSTORE_TS_KEY);
      if (tmp != null) {
        fd.maxMVCCReadpoint = Math.max(fd.maxMVCCReadpoint, Bytes.toLong(tmp));
      }
    }
    tmp = fileInfo.get(FileInfo.MAX_TAGS_LEN);
    if (tmp != null) {
      fd.maxTagsLength = Math.max(fd.maxTagsLength, Bytes.toInt(tmp));
    }
    // If required, calculate the earliest put timestamp of all involved storefiles.
    // This is used to remove family delete marker during compaction.
    long earliestPutTs = 0;
    if (allFiles) {
      tmp = fileInfo.get(StoreFile.EARLIEST_PUT_TS);
      if (tmp == null) {
        // There's a file with no information, must be an old one
        // assume we have very old puts
        fd.earliestPutTs = earliestPutTs = HConstants.OLDEST_TIMESTAMP;
      } else {
        earliestPutTs = Bytes.toLong(tmp);
        fd.earliestPutTs = Math.min(fd.earliestPutTs, earliestPutTs);
      }
    }
    if (LOG.isDebugEnabled()) {
      LOG.debug("Compacting " + file +
        ", keycount=" + keyCount +
        ", bloomtype=" + r.getBloomFilterType().toString() +
        ", size=" + StringUtils.humanReadableInt(r.length()) +
        ", encoding=" + r.getHFileReader().getDataBlockEncoding() +
        ", seqNum=" + seqNum +
        (allFiles ? ", earliestPutTs=" + earliestPutTs: ""));
    }
  }
  return fd;
}
 
开发者ID:grokcoder,项目名称:pbase,代码行数:77,代码来源:Compactor.java

示例3: getFileDetails

import org.apache.hadoop.hbase.io.hfile.HFileWriterV2; //导入依赖的package包/类
/**
 * Extracts some details about the files to compact that are commonly needed by compactors.
 * @param filesToCompact Files.
 * @param calculatePutTs Whether earliest put TS is needed.
 * @return The result.
 */
protected FileDetails getFileDetails(
    Collection<StoreFile> filesToCompact, boolean calculatePutTs) throws IOException {
  FileDetails fd = new FileDetails();

  for (StoreFile file : filesToCompact) {
    long seqNum = file.getMaxSequenceId();
    fd.maxSeqId = Math.max(fd.maxSeqId, seqNum);
    StoreFile.Reader r = file.getReader();
    if (r == null) {
      LOG.warn("Null reader for " + file.getPath());
      continue;
    }
    // NOTE: getFilterEntries could cause under-sized blooms if the user
    // switches bloom type (e.g. from ROW to ROWCOL)
    long keyCount = (r.getBloomFilterType() == store.getFamily().getBloomFilterType())
        ? r.getFilterEntries() : r.getEntries();
    fd.maxKeyCount += keyCount;
    // calculate the latest MVCC readpoint in any of the involved store files
    Map<byte[], byte[]> fileInfo = r.loadFileInfo();
    byte tmp[] = fileInfo.get(HFileWriterV2.MAX_MEMSTORE_TS_KEY);
    if (tmp != null) {
      fd.maxMVCCReadpoint = Math.max(fd.maxMVCCReadpoint, Bytes.toLong(tmp));
    }
    tmp = fileInfo.get(FileInfo.MAX_TAGS_LEN);
    if (tmp != null) {
      fd.maxTagsLength = Math.max(fd.maxTagsLength, Bytes.toInt(tmp));
    }
    // If required, calculate the earliest put timestamp of all involved storefiles.
    // This is used to remove family delete marker during compaction.
    long earliestPutTs = 0;
    if (calculatePutTs) {
      tmp = fileInfo.get(StoreFile.EARLIEST_PUT_TS);
      if (tmp == null) {
        // There's a file with no information, must be an old one
        // assume we have very old puts
        fd.earliestPutTs = earliestPutTs = HConstants.OLDEST_TIMESTAMP;
      } else {
        earliestPutTs = Bytes.toLong(tmp);
        fd.earliestPutTs = Math.min(fd.earliestPutTs, earliestPutTs);
      }
    }
    if (LOG.isDebugEnabled()) {
      LOG.debug("Compacting " + file +
        ", keycount=" + keyCount +
        ", bloomtype=" + r.getBloomFilterType().toString() +
        ", size=" + StringUtils.humanReadableInt(r.length()) +
        ", encoding=" + r.getHFileReader().getDataBlockEncoding() +
        ", seqNum=" + seqNum +
        (calculatePutTs ? ", earliestPutTs=" + earliestPutTs: ""));
    }
  }
  return fd;
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:60,代码来源:Compactor.java

示例4: getFileDetails

import org.apache.hadoop.hbase.io.hfile.HFileWriterV2; //导入依赖的package包/类
protected FileDetails getFileDetails(
    Collection<StoreFile> filesToCompact, boolean calculatePutTs) throws IOException {
  FileDetails fd = new FileDetails();

  for (StoreFile file : filesToCompact) {
    long seqNum = file.getMaxSequenceId();
    fd.maxSeqId = Math.max(fd.maxSeqId, seqNum);
    StoreFile.Reader r = file.getReader();
    if (r == null) {
      LOG.warn("Null reader for " + file.getPath());
      continue;
    }
    // NOTE: getFilterEntries could cause under-sized blooms if the user
    // switches bloom type (e.g. from ROW to ROWCOL)
    long keyCount = (r.getBloomFilterType() == store.getFamily().getBloomFilterType())
        ? r.getFilterEntries() : r.getEntries();
    fd.maxKeyCount += keyCount;
    // calculate the latest MVCC readpoint in any of the involved store files
    Map<byte[], byte[]> fileInfo = r.loadFileInfo();
    byte tmp[] = fileInfo.get(HFileWriterV2.MAX_MEMSTORE_TS_KEY);
    if (tmp != null) {
      fd.maxMVCCReadpoint = Math.max(fd.maxMVCCReadpoint, Bytes.toLong(tmp));
    }
    // If required, calculate the earliest put timestamp of all involved storefiles.
    // This is used to remove family delete marker during compaction.
    long earliestPutTs = 0;
    if (calculatePutTs) {
      tmp = fileInfo.get(StoreFile.EARLIEST_PUT_TS);
      if (tmp == null) {
        // There's a file with no information, must be an old one
        // assume we have very old puts
        fd.earliestPutTs = earliestPutTs = HConstants.OLDEST_TIMESTAMP;
      } else {
        earliestPutTs = Bytes.toLong(tmp);
        fd.earliestPutTs = Math.min(fd.earliestPutTs, earliestPutTs);
      }
    }
    if (LOG.isDebugEnabled()) {
      LOG.debug("Compacting " + file +
        ", keycount=" + keyCount +
        ", bloomtype=" + r.getBloomFilterType().toString() +
        ", size=" + StringUtils.humanReadableInt(r.length()) +
        ", encoding=" + r.getHFileReader().getDataBlockEncoding() +
        ", seqNum=" + seqNum +
        (calculatePutTs ? ", earliestPutTs=" + earliestPutTs: ""));
    }
  }
  return fd;
}
 
开发者ID:cloud-software-foundation,项目名称:c5,代码行数:50,代码来源:Compactor.java


注:本文中的org.apache.hadoop.hbase.io.hfile.HFileWriterV2类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。