当前位置: 首页>>代码示例>>Java>>正文


Java StoreUtils类代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.regionserver.StoreUtils的典型用法代码示例。如果您正苦于以下问题:Java StoreUtils类的具体用法?Java StoreUtils怎么用?Java StoreUtils使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


StoreUtils类属于org.apache.hadoop.hbase.regionserver包,在下文中一共展示了StoreUtils类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: selectCompaction

import org.apache.hadoop.hbase.regionserver.StoreUtils; //导入依赖的package包/类
@Override
public CompactionRequest selectCompaction(Collection<StoreFile> candidateFiles,
    List<StoreFile> filesCompacting, boolean isUserCompaction, boolean mayUseOffPeak,
    boolean forceMajor) throws IOException {
  
  if(forceMajor){
    LOG.warn("Major compaction is not supported for FIFO compaction policy. Ignore the flag.");
  }
  boolean isAfterSplit = StoreUtils.hasReferences(candidateFiles);
  if(isAfterSplit){
    LOG.info("Split detected, delegate selection to the parent policy.");
    return super.selectCompaction(candidateFiles, filesCompacting, isUserCompaction, 
      mayUseOffPeak, forceMajor);
  }
  
  // Nothing to compact
  Collection<StoreFile> toCompact = getExpiredStores(candidateFiles, filesCompacting);
  CompactionRequest result = new CompactionRequest(toCompact);
  return result;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:21,代码来源:FIFOCompactionPolicy.java

示例2: getNextMajorCompactTime

import org.apache.hadoop.hbase.regionserver.StoreUtils; //导入依赖的package包/类
/**
 * @param filesToCompact
 * @return When to run next major compaction
 */
public long getNextMajorCompactTime(final Collection<StoreFile> filesToCompact) {
  // default = 24hrs
  long ret = comConf.getMajorCompactionPeriod();
  if (ret > 0) {
    // default = 20% = +/- 4.8 hrs
    double jitterPct = comConf.getMajorCompactionJitter();
    if (jitterPct > 0) {
      long jitter = Math.round(ret * jitterPct);
      // deterministic jitter avoids a major compaction storm on restart
      Integer seed = StoreUtils.getDeterministicRandomSeed(filesToCompact);
      if (seed != null) {
        // Synchronized to ensure one user of random instance at a time.
        double rnd = -1;
        synchronized (this) {
          this.random.setSeed(seed);
          rnd = this.random.nextDouble();
        }
        ret += jitter - Math.round(2L * jitter * rnd);
      } else {
        ret = 0; // If seed is null, then no storefiles == no major compaction
      }
    }
  }
  return ret;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:30,代码来源:RatioBasedCompactionPolicy.java

示例3: getNextMajorCompactTime

import org.apache.hadoop.hbase.regionserver.StoreUtils; //导入依赖的package包/类
public long getNextMajorCompactTime(final Collection<StoreFile> filesToCompact) {
  // default = 24hrs
  long ret = comConf.getMajorCompactionPeriod();
  if (ret > 0) {
    // default = 20% = +/- 4.8 hrs
    double jitterPct = comConf.getMajorCompactionJitter();
    if (jitterPct > 0) {
      long jitter = Math.round(ret * jitterPct);
      // deterministic jitter avoids a major compaction storm on restart
      Integer seed = StoreUtils.getDeterministicRandomSeed(filesToCompact);
      if (seed != null) {
        double rnd = (new Random(seed)).nextDouble();
        ret += jitter - Math.round(2L * jitter * rnd);
      } else {
        ret = 0; // no storefiles == no major compaction
      }
    }
  }
  return ret;
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:21,代码来源:RatioBasedCompactionPolicy.java

示例4: selectCompaction

import org.apache.hadoop.hbase.regionserver.StoreUtils; //导入依赖的package包/类
@Override
public CompactionRequestImpl selectCompaction(Collection<HStoreFile> candidateFiles,
    List<HStoreFile> filesCompacting, boolean isUserCompaction, boolean mayUseOffPeak,
    boolean forceMajor) throws IOException {
  if(forceMajor){
    LOG.warn("Major compaction is not supported for FIFO compaction policy. Ignore the flag.");
  }
  boolean isAfterSplit = StoreUtils.hasReferences(candidateFiles);
  if(isAfterSplit){
    LOG.info("Split detected, delegate selection to the parent policy.");
    return super.selectCompaction(candidateFiles, filesCompacting, isUserCompaction, 
      mayUseOffPeak, forceMajor);
  }

  // Nothing to compact
  Collection<HStoreFile> toCompact = getExpiredStores(candidateFiles, filesCompacting);
  CompactionRequestImpl result = new CompactionRequestImpl(toCompact);
  return result;
}
 
开发者ID:apache,项目名称:hbase,代码行数:20,代码来源:FIFOCompactionPolicy.java

示例5: getNextMajorCompactTime

import org.apache.hadoop.hbase.regionserver.StoreUtils; //导入依赖的package包/类
public long getNextMajorCompactTime(final List<StoreFile> filesToCompact) {
  // default = 24hrs
  long ret = comConf.getMajorCompactionPeriod();
  if (ret > 0) {
    // default = 20% = +/- 4.8 hrs
    double jitterPct = comConf.getMajorCompactionJitter();
    if (jitterPct > 0) {
      long jitter = Math.round(ret * jitterPct);
      // deterministic jitter avoids a major compaction storm on restart
      Integer seed = StoreUtils.getDeterministicRandomSeed(filesToCompact);
      if (seed != null) {
        double rnd = (new Random(seed)).nextDouble();
        ret += jitter - Math.round(2L * jitter * rnd);
      } else {
        ret = 0; // no storefiles == no major compaction
      }
    }
  }
  return ret;
}
 
开发者ID:daidong,项目名称:DominoHBase,代码行数:21,代码来源:CompactionPolicy.java

示例6: isMajorCompaction

import org.apache.hadoop.hbase.regionserver.StoreUtils; //导入依赖的package包/类
@Override
public boolean isMajorCompaction(Collection<StoreFile> filesToCompact) throws IOException {
  boolean isAfterSplit = StoreUtils.hasReferences(filesToCompact);
  if(isAfterSplit){
    LOG.info("Split detected, delegate to the parent policy.");
    return super.isMajorCompaction(filesToCompact);
  }
  return false;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:10,代码来源:FIFOCompactionPolicy.java

示例7: needsCompaction

import org.apache.hadoop.hbase.regionserver.StoreUtils; //导入依赖的package包/类
@Override
public boolean needsCompaction(Collection<StoreFile> storeFiles, 
    List<StoreFile> filesCompacting) {  
  boolean isAfterSplit = StoreUtils.hasReferences(storeFiles);
  if(isAfterSplit){
    LOG.info("Split detected, delegate to the parent policy.");
    return super.needsCompaction(storeFiles, filesCompacting);
  }
  return hasExpiredStores(storeFiles);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:11,代码来源:FIFOCompactionPolicy.java

示例8: needsCompactions

import org.apache.hadoop.hbase.regionserver.StoreUtils; //导入依赖的package包/类
public boolean needsCompactions(StripeInformationProvider si, List<StoreFile> filesCompacting) {
  // Approximation on whether we need compaction.
  return filesCompacting.isEmpty()
      && (StoreUtils.hasReferences(si.getStorefiles())
        || (si.getLevel0Files().size() >= this.config.getLevel0MinFiles())
        || needsSingleStripeCompaction(si));
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:8,代码来源:StripeCompactionPolicy.java

示例9: needEmptyFile

import org.apache.hadoop.hbase.regionserver.StoreUtils; //导入依赖的package包/类
private boolean needEmptyFile(CompactionRequestImpl request) {
  // if we are going to compact the last N files, then we need to emit an empty file to retain the
  // maxSeqId if we haven't written out anything.
  OptionalLong maxSeqId = StoreUtils.getMaxSequenceIdInList(request.getFiles());
  OptionalLong storeMaxSeqId = store.getMaxSequenceId();
  return maxSeqId.isPresent() && storeMaxSeqId.isPresent() &&
      maxSeqId.getAsLong() == storeMaxSeqId.getAsLong();
}
 
开发者ID:apache,项目名称:hbase,代码行数:9,代码来源:DateTieredCompactor.java

示例10: getNextMajorCompactTime

import org.apache.hadoop.hbase.regionserver.StoreUtils; //导入依赖的package包/类
/**
 * @param filesToCompact
 * @return When to run next major compaction
 */
public long getNextMajorCompactTime(Collection<HStoreFile> filesToCompact) {
  /** Default to {@link org.apache.hadoop.hbase.HConstants#DEFAULT_MAJOR_COMPACTION_PERIOD}. */
  long period = comConf.getMajorCompactionPeriod();
  if (period <= 0) {
    return period;
  }

  /**
   * Default to {@link org.apache.hadoop.hbase.HConstants#DEFAULT_MAJOR_COMPACTION_JITTER},
   * that is, +/- 3.5 days (7 days * 0.5).
   */
  double jitterPct = comConf.getMajorCompactionJitter();
  if (jitterPct <= 0) {
    return period;
  }

  // deterministic jitter avoids a major compaction storm on restart
  OptionalInt seed = StoreUtils.getDeterministicRandomSeed(filesToCompact);
  if (seed.isPresent()) {
    // Synchronized to ensure one user of random instance at a time.
    double rnd;
    synchronized (this) {
      this.random.setSeed(seed.getAsInt());
      rnd = this.random.nextDouble();
    }
    long jitter = Math.round(period * jitterPct);
    return period + jitter - Math.round(2L * jitter * rnd);
  } else {
    return 0L;
  }
}
 
开发者ID:apache,项目名称:hbase,代码行数:36,代码来源:SortedCompactionPolicy.java

示例11: shouldPerformMajorCompaction

import org.apache.hadoop.hbase.regionserver.StoreUtils; //导入依赖的package包/类
@Override
public boolean shouldPerformMajorCompaction(Collection<HStoreFile> filesToCompact)
  throws IOException {
  boolean isAfterSplit = StoreUtils.hasReferences(filesToCompact);
  if(isAfterSplit){
    LOG.info("Split detected, delegate to the parent policy.");
    return super.shouldPerformMajorCompaction(filesToCompact);
  }
  return false;
}
 
开发者ID:apache,项目名称:hbase,代码行数:11,代码来源:FIFOCompactionPolicy.java

示例12: needsCompaction

import org.apache.hadoop.hbase.regionserver.StoreUtils; //导入依赖的package包/类
@Override
public boolean needsCompaction(Collection<HStoreFile> storeFiles,
    List<HStoreFile> filesCompacting) {
  boolean isAfterSplit = StoreUtils.hasReferences(storeFiles);
  if(isAfterSplit){
    LOG.info("Split detected, delegate to the parent policy.");
    return super.needsCompaction(storeFiles, filesCompacting);
  }
  return hasExpiredStores(storeFiles);
}
 
开发者ID:apache,项目名称:hbase,代码行数:11,代码来源:FIFOCompactionPolicy.java

示例13: needsCompactions

import org.apache.hadoop.hbase.regionserver.StoreUtils; //导入依赖的package包/类
public boolean needsCompactions(StripeInformationProvider si, List<HStoreFile> filesCompacting) {
  // Approximation on whether we need compaction.
  return filesCompacting.isEmpty()
      && (StoreUtils.hasReferences(si.getStorefiles())
        || (si.getLevel0Files().size() >= this.config.getLevel0MinFiles())
        || needsSingleStripeCompaction(si));
}
 
开发者ID:apache,项目名称:hbase,代码行数:8,代码来源:StripeCompactionPolicy.java

示例14: selectCompaction

import org.apache.hadoop.hbase.regionserver.StoreUtils; //导入依赖的package包/类
/**
 * @param candidateFiles candidate files, ordered from oldest to newest
 * @return subset copy of candidate list that meets compaction criteria
 * @throws java.io.IOException
 */
public CompactSelection selectCompaction(List<StoreFile> candidateFiles,
    boolean isUserCompaction, boolean forceMajor)
  throws IOException {
  // Prelimanry compaction subject to filters
  CompactSelection candidateSelection = new CompactSelection(candidateFiles);
  long cfTtl = this.storeConfig.getStoreFileTtl();
  if (!forceMajor) {
    // If there are expired files, only select them so that compaction deletes them
    if (comConf.shouldDeleteExpired() && (cfTtl != Long.MAX_VALUE)) {
      CompactSelection expiredSelection = selectExpiredStoreFiles(
        candidateSelection, EnvironmentEdgeManager.currentTimeMillis() - cfTtl);
      if (expiredSelection != null) {
        return expiredSelection;
      }
    }
    candidateSelection = skipLargeFiles(candidateSelection);
  }

  // Force a major compaction if this is a user-requested major compaction,
  // or if we do not have too many files to compact and this was requested
  // as a major compaction.
  // Or, if there are any references among the candidates.
  boolean majorCompaction = (
    (forceMajor && isUserCompaction)
    || ((forceMajor || isMajorCompaction(candidateSelection.getFilesToCompact()))
        && (candidateSelection.getFilesToCompact().size() < comConf.getMaxFilesToCompact()))
    || StoreUtils.hasReferences(candidateSelection.getFilesToCompact())
    );

  if (!majorCompaction) {
    // we're doing a minor compaction, let's see what files are applicable
    candidateSelection = filterBulk(candidateSelection);
    candidateSelection = applyCompactionPolicy(candidateSelection);
    candidateSelection = checkMinFilesCriteria(candidateSelection);
  }
  candidateSelection =
      removeExcessFiles(candidateSelection, isUserCompaction, majorCompaction);
  return candidateSelection;
}
 
开发者ID:daidong,项目名称:DominoHBase,代码行数:45,代码来源:CompactionPolicy.java

示例15: selectCompaction

import org.apache.hadoop.hbase.regionserver.StoreUtils; //导入依赖的package包/类
/**
 * @param candidateFiles candidate files, ordered from oldest to newest. All files in store.
 * @return subset copy of candidate list that meets compaction criteria
 * @throws java.io.IOException
 */
public CompactionRequest selectCompaction(Collection<StoreFile> candidateFiles,
    final List<StoreFile> filesCompacting, final boolean isUserCompaction,
    final boolean mayUseOffPeak, final boolean forceMajor) throws IOException {
  // Preliminary compaction subject to filters
  ArrayList<StoreFile> candidateSelection = new ArrayList<StoreFile>(candidateFiles);
  // Stuck and not compacting enough (estimate). It is not guaranteed that we will be
  // able to compact more if stuck and compacting, because ratio policy excludes some
  // non-compacting files from consideration during compaction (see getCurrentEligibleFiles).
  int futureFiles = filesCompacting.isEmpty() ? 0 : 1;
  boolean mayBeStuck = (candidateFiles.size() - filesCompacting.size() + futureFiles)
      >= storeConfigInfo.getBlockingFileCount();
  candidateSelection = getCurrentEligibleFiles(candidateSelection, filesCompacting);
  LOG.debug("Selecting compaction from " + candidateFiles.size() + " store files, " +
      filesCompacting.size() + " compacting, " + candidateSelection.size() +
      " eligible, " + storeConfigInfo.getBlockingFileCount() + " blocking");

  // If we can't have all files, we cannot do major anyway
  boolean isAllFiles = candidateFiles.size() == candidateSelection.size();
  if (!(forceMajor && isAllFiles)) {
    candidateSelection = skipLargeFiles(candidateSelection, mayUseOffPeak);
    isAllFiles = candidateFiles.size() == candidateSelection.size();
  }

  // Try a major compaction if this is a user-requested major compaction,
  // or if we do not have too many files to compact and this was requested as a major compaction
  boolean isTryingMajor = (forceMajor && isAllFiles && isUserCompaction)
      || (((forceMajor && isAllFiles) || isMajorCompaction(candidateSelection))
        && (candidateSelection.size() < comConf.getMaxFilesToCompact()));
  // Or, if there are any references among the candidates.
  boolean isAfterSplit = StoreUtils.hasReferences(candidateSelection);
  if (!isTryingMajor && !isAfterSplit) {
    // We're are not compacting all files, let's see what files are applicable
    candidateSelection = filterBulk(candidateSelection);
    candidateSelection = applyCompactionPolicy(candidateSelection, mayUseOffPeak, mayBeStuck);
    candidateSelection = checkMinFilesCriteria(candidateSelection);
  }
  candidateSelection = removeExcessFiles(candidateSelection, isUserCompaction, isTryingMajor);
  // Now we have the final file list, so we can determine if we can do major/all files.
  isAllFiles = (candidateFiles.size() == candidateSelection.size());
  CompactionRequest result = new CompactionRequest(candidateSelection);
  result.setOffPeak(!candidateSelection.isEmpty() && !isAllFiles && mayUseOffPeak);
  result.setIsMajor(isTryingMajor && isAllFiles, isAllFiles);
  return result;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:50,代码来源:RatioBasedCompactionPolicy.java


注:本文中的org.apache.hadoop.hbase.regionserver.StoreUtils类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。