当前位置: 首页>>代码示例>>Java>>正文


Java StoreUtils.hasReferences方法代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.regionserver.StoreUtils.hasReferences方法的典型用法代码示例。如果您正苦于以下问题:Java StoreUtils.hasReferences方法的具体用法?Java StoreUtils.hasReferences怎么用?Java StoreUtils.hasReferences使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hbase.regionserver.StoreUtils的用法示例。


在下文中一共展示了StoreUtils.hasReferences方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: selectCompaction

import org.apache.hadoop.hbase.regionserver.StoreUtils; //导入方法依赖的package包/类
@Override
public CompactionRequest selectCompaction(Collection<StoreFile> candidateFiles,
    List<StoreFile> filesCompacting, boolean isUserCompaction, boolean mayUseOffPeak,
    boolean forceMajor) throws IOException {
  
  if(forceMajor){
    LOG.warn("Major compaction is not supported for FIFO compaction policy. Ignore the flag.");
  }
  boolean isAfterSplit = StoreUtils.hasReferences(candidateFiles);
  if(isAfterSplit){
    LOG.info("Split detected, delegate selection to the parent policy.");
    return super.selectCompaction(candidateFiles, filesCompacting, isUserCompaction, 
      mayUseOffPeak, forceMajor);
  }
  
  // Nothing to compact
  Collection<StoreFile> toCompact = getExpiredStores(candidateFiles, filesCompacting);
  CompactionRequest result = new CompactionRequest(toCompact);
  return result;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:21,代码来源:FIFOCompactionPolicy.java

示例2: selectCompaction

import org.apache.hadoop.hbase.regionserver.StoreUtils; //导入方法依赖的package包/类
@Override
public CompactionRequestImpl selectCompaction(Collection<HStoreFile> candidateFiles,
    List<HStoreFile> filesCompacting, boolean isUserCompaction, boolean mayUseOffPeak,
    boolean forceMajor) throws IOException {
  if(forceMajor){
    LOG.warn("Major compaction is not supported for FIFO compaction policy. Ignore the flag.");
  }
  boolean isAfterSplit = StoreUtils.hasReferences(candidateFiles);
  if(isAfterSplit){
    LOG.info("Split detected, delegate selection to the parent policy.");
    return super.selectCompaction(candidateFiles, filesCompacting, isUserCompaction, 
      mayUseOffPeak, forceMajor);
  }

  // Nothing to compact
  Collection<HStoreFile> toCompact = getExpiredStores(candidateFiles, filesCompacting);
  CompactionRequestImpl result = new CompactionRequestImpl(toCompact);
  return result;
}
 
开发者ID:apache,项目名称:hbase,代码行数:20,代码来源:FIFOCompactionPolicy.java

示例3: isMajorCompaction

import org.apache.hadoop.hbase.regionserver.StoreUtils; //导入方法依赖的package包/类
@Override
public boolean isMajorCompaction(Collection<StoreFile> filesToCompact) throws IOException {
  boolean isAfterSplit = StoreUtils.hasReferences(filesToCompact);
  if(isAfterSplit){
    LOG.info("Split detected, delegate to the parent policy.");
    return super.isMajorCompaction(filesToCompact);
  }
  return false;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:10,代码来源:FIFOCompactionPolicy.java

示例4: needsCompaction

import org.apache.hadoop.hbase.regionserver.StoreUtils; //导入方法依赖的package包/类
@Override
public boolean needsCompaction(Collection<StoreFile> storeFiles, 
    List<StoreFile> filesCompacting) {  
  boolean isAfterSplit = StoreUtils.hasReferences(storeFiles);
  if(isAfterSplit){
    LOG.info("Split detected, delegate to the parent policy.");
    return super.needsCompaction(storeFiles, filesCompacting);
  }
  return hasExpiredStores(storeFiles);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:11,代码来源:FIFOCompactionPolicy.java

示例5: needsCompactions

import org.apache.hadoop.hbase.regionserver.StoreUtils; //导入方法依赖的package包/类
public boolean needsCompactions(StripeInformationProvider si, List<StoreFile> filesCompacting) {
  // Approximation on whether we need compaction.
  return filesCompacting.isEmpty()
      && (StoreUtils.hasReferences(si.getStorefiles())
        || (si.getLevel0Files().size() >= this.config.getLevel0MinFiles())
        || needsSingleStripeCompaction(si));
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:8,代码来源:StripeCompactionPolicy.java

示例6: shouldPerformMajorCompaction

import org.apache.hadoop.hbase.regionserver.StoreUtils; //导入方法依赖的package包/类
@Override
public boolean shouldPerformMajorCompaction(Collection<HStoreFile> filesToCompact)
  throws IOException {
  boolean isAfterSplit = StoreUtils.hasReferences(filesToCompact);
  if(isAfterSplit){
    LOG.info("Split detected, delegate to the parent policy.");
    return super.shouldPerformMajorCompaction(filesToCompact);
  }
  return false;
}
 
开发者ID:apache,项目名称:hbase,代码行数:11,代码来源:FIFOCompactionPolicy.java

示例7: needsCompaction

import org.apache.hadoop.hbase.regionserver.StoreUtils; //导入方法依赖的package包/类
@Override
public boolean needsCompaction(Collection<HStoreFile> storeFiles,
    List<HStoreFile> filesCompacting) {
  boolean isAfterSplit = StoreUtils.hasReferences(storeFiles);
  if(isAfterSplit){
    LOG.info("Split detected, delegate to the parent policy.");
    return super.needsCompaction(storeFiles, filesCompacting);
  }
  return hasExpiredStores(storeFiles);
}
 
开发者ID:apache,项目名称:hbase,代码行数:11,代码来源:FIFOCompactionPolicy.java

示例8: needsCompactions

import org.apache.hadoop.hbase.regionserver.StoreUtils; //导入方法依赖的package包/类
public boolean needsCompactions(StripeInformationProvider si, List<HStoreFile> filesCompacting) {
  // Approximation on whether we need compaction.
  return filesCompacting.isEmpty()
      && (StoreUtils.hasReferences(si.getStorefiles())
        || (si.getLevel0Files().size() >= this.config.getLevel0MinFiles())
        || needsSingleStripeCompaction(si));
}
 
开发者ID:apache,项目名称:hbase,代码行数:8,代码来源:StripeCompactionPolicy.java

示例9: selectCompaction

import org.apache.hadoop.hbase.regionserver.StoreUtils; //导入方法依赖的package包/类
/**
 * @param candidateFiles candidate files, ordered from oldest to newest
 * @return subset copy of candidate list that meets compaction criteria
 * @throws java.io.IOException
 */
public CompactSelection selectCompaction(List<StoreFile> candidateFiles,
    boolean isUserCompaction, boolean forceMajor)
  throws IOException {
  // Prelimanry compaction subject to filters
  CompactSelection candidateSelection = new CompactSelection(candidateFiles);
  long cfTtl = this.storeConfig.getStoreFileTtl();
  if (!forceMajor) {
    // If there are expired files, only select them so that compaction deletes them
    if (comConf.shouldDeleteExpired() && (cfTtl != Long.MAX_VALUE)) {
      CompactSelection expiredSelection = selectExpiredStoreFiles(
        candidateSelection, EnvironmentEdgeManager.currentTimeMillis() - cfTtl);
      if (expiredSelection != null) {
        return expiredSelection;
      }
    }
    candidateSelection = skipLargeFiles(candidateSelection);
  }

  // Force a major compaction if this is a user-requested major compaction,
  // or if we do not have too many files to compact and this was requested
  // as a major compaction.
  // Or, if there are any references among the candidates.
  boolean majorCompaction = (
    (forceMajor && isUserCompaction)
    || ((forceMajor || isMajorCompaction(candidateSelection.getFilesToCompact()))
        && (candidateSelection.getFilesToCompact().size() < comConf.getMaxFilesToCompact()))
    || StoreUtils.hasReferences(candidateSelection.getFilesToCompact())
    );

  if (!majorCompaction) {
    // we're doing a minor compaction, let's see what files are applicable
    candidateSelection = filterBulk(candidateSelection);
    candidateSelection = applyCompactionPolicy(candidateSelection);
    candidateSelection = checkMinFilesCriteria(candidateSelection);
  }
  candidateSelection =
      removeExcessFiles(candidateSelection, isUserCompaction, majorCompaction);
  return candidateSelection;
}
 
开发者ID:daidong,项目名称:DominoHBase,代码行数:45,代码来源:CompactionPolicy.java

示例10: selectCompaction

import org.apache.hadoop.hbase.regionserver.StoreUtils; //导入方法依赖的package包/类
/**
 * @param candidateFiles candidate files, ordered from oldest to newest. All files in store.
 * @return subset copy of candidate list that meets compaction criteria
 * @throws java.io.IOException
 */
public CompactionRequest selectCompaction(Collection<StoreFile> candidateFiles,
    final List<StoreFile> filesCompacting, final boolean isUserCompaction,
    final boolean mayUseOffPeak, final boolean forceMajor) throws IOException {
  // Preliminary compaction subject to filters
  ArrayList<StoreFile> candidateSelection = new ArrayList<StoreFile>(candidateFiles);
  // Stuck and not compacting enough (estimate). It is not guaranteed that we will be
  // able to compact more if stuck and compacting, because ratio policy excludes some
  // non-compacting files from consideration during compaction (see getCurrentEligibleFiles).
  int futureFiles = filesCompacting.isEmpty() ? 0 : 1;
  boolean mayBeStuck = (candidateFiles.size() - filesCompacting.size() + futureFiles)
      >= storeConfigInfo.getBlockingFileCount();
  candidateSelection = getCurrentEligibleFiles(candidateSelection, filesCompacting);
  LOG.debug("Selecting compaction from " + candidateFiles.size() + " store files, " +
      filesCompacting.size() + " compacting, " + candidateSelection.size() +
      " eligible, " + storeConfigInfo.getBlockingFileCount() + " blocking");

  // If we can't have all files, we cannot do major anyway
  boolean isAllFiles = candidateFiles.size() == candidateSelection.size();
  if (!(forceMajor && isAllFiles)) {
    candidateSelection = skipLargeFiles(candidateSelection, mayUseOffPeak);
    isAllFiles = candidateFiles.size() == candidateSelection.size();
  }

  // Try a major compaction if this is a user-requested major compaction,
  // or if we do not have too many files to compact and this was requested as a major compaction
  boolean isTryingMajor = (forceMajor && isAllFiles && isUserCompaction)
      || (((forceMajor && isAllFiles) || isMajorCompaction(candidateSelection))
        && (candidateSelection.size() < comConf.getMaxFilesToCompact()));
  // Or, if there are any references among the candidates.
  boolean isAfterSplit = StoreUtils.hasReferences(candidateSelection);
  if (!isTryingMajor && !isAfterSplit) {
    // We're are not compacting all files, let's see what files are applicable
    candidateSelection = filterBulk(candidateSelection);
    candidateSelection = applyCompactionPolicy(candidateSelection, mayUseOffPeak, mayBeStuck);
    candidateSelection = checkMinFilesCriteria(candidateSelection);
  }
  candidateSelection = removeExcessFiles(candidateSelection, isUserCompaction, isTryingMajor);
  // Now we have the final file list, so we can determine if we can do major/all files.
  isAllFiles = (candidateFiles.size() == candidateSelection.size());
  CompactionRequest result = new CompactionRequest(candidateSelection);
  result.setOffPeak(!candidateSelection.isEmpty() && !isAllFiles && mayUseOffPeak);
  result.setIsMajor(isTryingMajor && isAllFiles, isAllFiles);
  return result;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:50,代码来源:RatioBasedCompactionPolicy.java

示例11: selectCompaction

import org.apache.hadoop.hbase.regionserver.StoreUtils; //导入方法依赖的package包/类
/**
 * @param candidateFiles candidate files, ordered from oldest to newest. All files in store.
 * @return subset copy of candidate list that meets compaction criteria
 * @throws java.io.IOException
 */
public CompactionRequest selectCompaction(Collection<StoreFile> candidateFiles,
    final List<StoreFile> filesCompacting, final boolean isUserCompaction,
    final boolean mayUseOffPeak, final boolean forceMajor) throws IOException {
  // Preliminary compaction subject to filters
  ArrayList<StoreFile> candidateSelection = new ArrayList<StoreFile>(candidateFiles);
  // Stuck and not compacting enough (estimate). It is not guaranteed that we will be
  // able to compact more if stuck and compacting, because ratio policy excludes some
  // non-compacting files from consideration during compaction (see getCurrentEligibleFiles).
  int futureFiles = filesCompacting.isEmpty() ? 0 : 1;
  boolean mayBeStuck = (candidateFiles.size() - filesCompacting.size() + futureFiles)
      >= storeConfigInfo.getBlockingFileCount();
  candidateSelection = getCurrentEligibleFiles(candidateSelection, filesCompacting);
  LOG.debug("Selecting compaction from " + candidateFiles.size() + " store files, " +
      filesCompacting.size() + " compacting, " + candidateSelection.size() +
      " eligible, " + storeConfigInfo.getBlockingFileCount() + " blocking");

  // If we can't have all files, we cannot do major anyway
  boolean isAllFiles = candidateFiles.size() == candidateSelection.size();
  if (!(forceMajor && isAllFiles)) {
    candidateSelection = skipLargeFiles(candidateSelection);
    isAllFiles = candidateFiles.size() == candidateSelection.size();
  }

  // Try a major compaction if this is a user-requested major compaction,
  // or if we do not have too many files to compact and this was requested as a major compaction
  boolean isTryingMajor = (forceMajor && isAllFiles && isUserCompaction)
      || (((forceMajor && isAllFiles) || isMajorCompaction(candidateSelection))
        && (candidateSelection.size() < comConf.getMaxFilesToCompact()));
  // Or, if there are any references among the candidates.
  boolean isAfterSplit = StoreUtils.hasReferences(candidateSelection);
  if (!isTryingMajor && !isAfterSplit) {
    // We're are not compacting all files, let's see what files are applicable
    candidateSelection = filterBulk(candidateSelection);
    candidateSelection = applyCompactionPolicy(candidateSelection, mayUseOffPeak, mayBeStuck);
    candidateSelection = checkMinFilesCriteria(candidateSelection);
  }
  candidateSelection = removeExcessFiles(candidateSelection, isUserCompaction, isTryingMajor);
  // Now we have the final file list, so we can determine if we can do major/all files.
  isAllFiles = (candidateFiles.size() == candidateSelection.size());
  CompactionRequest result = new CompactionRequest(candidateSelection);
  result.setOffPeak(!candidateSelection.isEmpty() && !isAllFiles && mayUseOffPeak);
  result.setIsMajor(isTryingMajor && isAllFiles, isAllFiles);
  return result;
}
 
开发者ID:grokcoder,项目名称:pbase,代码行数:50,代码来源:RatioBasedCompactionPolicy.java

示例12: selectCompaction

import org.apache.hadoop.hbase.regionserver.StoreUtils; //导入方法依赖的package包/类
/**
 * @param candidateFiles candidate files, ordered from oldest to newest
 * @return subset copy of candidate list that meets compaction criteria
 * @throws java.io.IOException
 */
public CompactionRequest selectCompaction(Collection<StoreFile> candidateFiles,
    final List<StoreFile> filesCompacting, final boolean isUserCompaction,
    final boolean mayUseOffPeak, final boolean forceMajor) throws IOException {
  // Preliminary compaction subject to filters
  ArrayList<StoreFile> candidateSelection = new ArrayList<StoreFile>(candidateFiles);
  // Stuck and not compacting enough (estimate). It is not guaranteed that we will be
  // able to compact more if stuck and compacting, because ratio policy excludes some
  // non-compacting files from consideration during compaction (see getCurrentEligibleFiles).
  int futureFiles = filesCompacting.isEmpty() ? 0 : 1;
  boolean mayBeStuck = (candidateFiles.size() - filesCompacting.size() + futureFiles)
      >= storeConfigInfo.getBlockingFileCount();
  candidateSelection = getCurrentEligibleFiles(candidateSelection, filesCompacting);
  LOG.debug("Selecting compaction from " + candidateFiles.size() + " store files, " +
      filesCompacting.size() + " compacting, " + candidateSelection.size() +
      " eligible, " + storeConfigInfo.getBlockingFileCount() + " blocking");

  long cfTtl = this.storeConfigInfo.getStoreFileTtl();
  if (!forceMajor) {
    // If there are expired files, only select them so that compaction deletes them
    if (comConf.shouldDeleteExpired() && (cfTtl != Long.MAX_VALUE)) {
      ArrayList<StoreFile> expiredSelection = selectExpiredStoreFiles(
          candidateSelection, EnvironmentEdgeManager.currentTimeMillis() - cfTtl);
      if (expiredSelection != null) {
        return new CompactionRequest(expiredSelection);
      }
    }
    candidateSelection = skipLargeFiles(candidateSelection);
  }

  // Force a major compaction if this is a user-requested major compaction,
  // or if we do not have too many files to compact and this was requested
  // as a major compaction.
  // Or, if there are any references among the candidates.
  boolean majorCompaction = (
    (forceMajor && isUserCompaction)
    || ((forceMajor || isMajorCompaction(candidateSelection))
        && (candidateSelection.size() < comConf.getMaxFilesToCompact()))
    || StoreUtils.hasReferences(candidateSelection)
    );

  if (!majorCompaction) {
    // we're doing a minor compaction, let's see what files are applicable
    candidateSelection = filterBulk(candidateSelection);
    candidateSelection = applyCompactionPolicy(candidateSelection, mayUseOffPeak, mayBeStuck);
    candidateSelection = checkMinFilesCriteria(candidateSelection);
  }
  candidateSelection = removeExcessFiles(candidateSelection, isUserCompaction, majorCompaction);
  CompactionRequest result = new CompactionRequest(candidateSelection);
  result.setOffPeak(!candidateSelection.isEmpty() && !majorCompaction && mayUseOffPeak);
  return result;
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:57,代码来源:RatioBasedCompactionPolicy.java

示例13: selectCompaction

import org.apache.hadoop.hbase.regionserver.StoreUtils; //导入方法依赖的package包/类
/**
 * @param candidateFiles candidate files, ordered from oldest to newest by seqId. We rely on
 *   DefaultStoreFileManager to sort the files by seqId to guarantee contiguous compaction based
 *   on seqId for data consistency.
 * @return subset copy of candidate list that meets compaction criteria
 */
public CompactionRequestImpl selectCompaction(Collection<HStoreFile> candidateFiles,
    List<HStoreFile> filesCompacting, boolean isUserCompaction, boolean mayUseOffPeak,
    boolean forceMajor) throws IOException {
  // Preliminary compaction subject to filters
  ArrayList<HStoreFile> candidateSelection = new ArrayList<>(candidateFiles);
  // Stuck and not compacting enough (estimate). It is not guaranteed that we will be
  // able to compact more if stuck and compacting, because ratio policy excludes some
  // non-compacting files from consideration during compaction (see getCurrentEligibleFiles).
  int futureFiles = filesCompacting.isEmpty() ? 0 : 1;
  boolean mayBeStuck = (candidateFiles.size() - filesCompacting.size() + futureFiles)
      >= storeConfigInfo.getBlockingFileCount();

  candidateSelection = getCurrentEligibleFiles(candidateSelection, filesCompacting);
  LOG.debug("Selecting compaction from " + candidateFiles.size() + " store files, " +
      filesCompacting.size() + " compacting, " + candidateSelection.size() +
      " eligible, " + storeConfigInfo.getBlockingFileCount() + " blocking");

  // If we can't have all files, we cannot do major anyway
  boolean isAllFiles = candidateFiles.size() == candidateSelection.size();
  if (!(forceMajor && isAllFiles)) {
    candidateSelection = skipLargeFiles(candidateSelection, mayUseOffPeak);
    isAllFiles = candidateFiles.size() == candidateSelection.size();
  }

  // Try a major compaction if this is a user-requested major compaction,
  // or if we do not have too many files to compact and this was requested as a major compaction
  boolean isTryingMajor = (forceMajor && isAllFiles && isUserCompaction)
      || (((forceMajor && isAllFiles) || shouldPerformMajorCompaction(candidateSelection))
        && (candidateSelection.size() < comConf.getMaxFilesToCompact()));
  // Or, if there are any references among the candidates.
  boolean isAfterSplit = StoreUtils.hasReferences(candidateSelection);

  CompactionRequestImpl result = createCompactionRequest(candidateSelection,
    isTryingMajor || isAfterSplit, mayUseOffPeak, mayBeStuck);

  ArrayList<HStoreFile> filesToCompact = Lists.newArrayList(result.getFiles());
  removeExcessFiles(filesToCompact, isUserCompaction, isTryingMajor);
  result.updateFiles(filesToCompact);

  isAllFiles = (candidateFiles.size() == filesToCompact.size());
  result.setOffPeak(!filesToCompact.isEmpty() && !isAllFiles && mayUseOffPeak);
  result.setIsMajor(isTryingMajor && isAllFiles, isAllFiles);

  return result;
}
 
开发者ID:apache,项目名称:hbase,代码行数:52,代码来源:SortedCompactionPolicy.java


注:本文中的org.apache.hadoop.hbase.regionserver.StoreUtils.hasReferences方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。