当前位置: 首页>>代码示例>>Java>>正文


Java StoreFile类代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.regionserver.StoreFile的典型用法代码示例。如果您正苦于以下问题:Java StoreFile类的具体用法?Java StoreFile怎么用?Java StoreFile使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


StoreFile类属于org.apache.hadoop.hbase.regionserver包,在下文中一共展示了StoreFile类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: deleteStoreFilesWithoutArchiving

import org.apache.hadoop.hbase.regionserver.StoreFile; //导入依赖的package包/类
/**
 * Just do a simple delete of the given store files
 * <p>
 * A best effort is made to delete each of the files, rather than bailing on the first failure.
 * <p>
 * This method is preferable to {@link #deleteFilesWithoutArchiving(Collection)} since it consumes
 * less resources, but is limited in terms of usefulness
 * @param compactedFiles store files to delete from the file system.
 * @throws IOException if a file cannot be deleted. All files will be attempted to deleted before
 *           throwing the exception, rather than failing at the first file.
 */
private static void deleteStoreFilesWithoutArchiving(Collection<StoreFile> compactedFiles)
    throws IOException {
  LOG.debug("Deleting store files without archiving.");
  List<IOException> errors = new ArrayList<IOException>(0);
  for (StoreFile hsf : compactedFiles) {
    try {
      hsf.deleteReader();
    } catch (IOException e) {
      LOG.error("Failed to delete store file:" + hsf.getPath());
      errors.add(e);
    }
  }
  if (errors.size() > 0) {
    throw MultipleIOException.createIOException(errors);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:28,代码来源:HFileArchiver.java

示例2: iterator

import org.apache.hadoop.hbase.regionserver.StoreFile; //导入依赖的package包/类
@Override
public final Iterator<List<StoreFile>> iterator() {
  return new Iterator<List<StoreFile>>() {
    private int nextIndex = 0;
    @Override
    public boolean hasNext() {
      return nextIndex < fileSizes.length;
    }

    @Override
    public List<StoreFile> next() {
      List<StoreFile> files =  createStoreFileList(fileSizes[nextIndex]);
      nextIndex += 1;
      return files;
    }

    @Override
    public void remove() {
    }
  };
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:22,代码来源:ExplicitFileListGenerator.java

示例3: filesInRatio

import org.apache.hadoop.hbase.regionserver.StoreFile; //导入依赖的package包/类
/**
 * Check that all files satisfy the constraint
 *      FileSize(i) <= ( Sum(0,N,FileSize(_)) - FileSize(i) ) * Ratio.
 *
 * @param files List of store files to consider as a compaction candidate.
 * @param currentRatio The ratio to use.
 * @return a boolean if these files satisfy the ratio constraints.
 */
private boolean filesInRatio(final List<StoreFile> files, final double currentRatio) {
  if (files.size() < 2) {
    return true;
  }

  long totalFileSize = getTotalStoreSize(files);

  for (StoreFile file : files) {
    long singleFileSize = file.getReader().length();
    long sumAllOtherFileSizes = totalFileSize - singleFileSize;

    if (singleFileSize > sumAllOtherFileSizes * currentRatio) {
      return false;
    }
  }
  return true;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:26,代码来源:ExploringCompactionPolicy.java

示例4: createCompactJob

import org.apache.hadoop.hbase.regionserver.StoreFile; //导入依赖的package包/类
private CompactJobQueue.CompactJob createCompactJob(final CompactionRequest request,
    final Path writtenPath, HStore store) throws IOException {
  // check reference file, not supported yet!
  boolean needToRebuild = false;
  for (StoreFile sf : request.getFiles()) {
    if (sf.getPath().getName().indexOf(".") != -1 || sf.isReference()) {
      needToRebuild = true;
      break;
    }
  }
  CompactJobQueue.CompactJob job;
  if (needToRebuild) {
    job = new CompactJobQueue.RebuildCompactJob(store, request, writtenPath);
  } else {
    job = new CompactJobQueue.NormalCompactJob(store, request, writtenPath);
  }
  return job;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:19,代码来源:DefaultCompactor.java

示例5: selectCompaction

import org.apache.hadoop.hbase.regionserver.StoreFile; //导入依赖的package包/类
@Override
public CompactionRequest selectCompaction(Collection<StoreFile> candidateFiles,
    List<StoreFile> filesCompacting, boolean isUserCompaction, boolean mayUseOffPeak,
    boolean forceMajor) throws IOException {
  
  if(forceMajor){
    LOG.warn("Major compaction is not supported for FIFO compaction policy. Ignore the flag.");
  }
  boolean isAfterSplit = StoreUtils.hasReferences(candidateFiles);
  if(isAfterSplit){
    LOG.info("Split detected, delegate selection to the parent policy.");
    return super.selectCompaction(candidateFiles, filesCompacting, isUserCompaction, 
      mayUseOffPeak, forceMajor);
  }
  
  // Nothing to compact
  Collection<StoreFile> toCompact = getExpiredStores(candidateFiles, filesCompacting);
  CompactionRequest result = new CompactionRequest(toCompact);
  return result;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:21,代码来源:FIFOCompactionPolicy.java

示例6: hasExpiredStores

import org.apache.hadoop.hbase.regionserver.StoreFile; //导入依赖的package包/类
private  boolean hasExpiredStores(Collection<StoreFile> files) {
  long currentTime = EnvironmentEdgeManager.currentTime();
  for(StoreFile sf: files){
    // Check MIN_VERSIONS is in HStore removeUnneededFiles
    Long maxTs = sf.getReader().getMaxTimestamp();
    long maxTtl = storeConfigInfo.getStoreFileTtl();
    if(maxTs == null 
        || maxTtl == Long.MAX_VALUE
        || (currentTime - maxTtl < maxTs)){
      continue; 
    } else{
      return true;
    }
  }
  return false;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:17,代码来源:FIFOCompactionPolicy.java

示例7: getExpiredStores

import org.apache.hadoop.hbase.regionserver.StoreFile; //导入依赖的package包/类
private  Collection<StoreFile> getExpiredStores(Collection<StoreFile> files,
  Collection<StoreFile> filesCompacting) {
  long currentTime = EnvironmentEdgeManager.currentTime();
  Collection<StoreFile> expiredStores = new ArrayList<StoreFile>();    
  for(StoreFile sf: files){
    // Check MIN_VERSIONS is in HStore removeUnneededFiles
    Long maxTs = sf.getReader().getMaxTimestamp();
    long maxTtl = storeConfigInfo.getStoreFileTtl();
    if(maxTs == null 
        || maxTtl == Long.MAX_VALUE
        || (currentTime - maxTtl < maxTs)){
      continue; 
    } else if(filesCompacting == null || filesCompacting.contains(sf) == false){
      expiredStores.add(sf);
    }
  }
  return expiredStores;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:19,代码来源:FIFOCompactionPolicy.java

示例8: iterator

import org.apache.hadoop.hbase.regionserver.StoreFile; //导入依赖的package包/类
@Override
public final Iterator<List<StoreFile>> iterator() {
  return new Iterator<List<StoreFile>>() {
    private int count = 0;

    @Override
    public boolean hasNext() {
      return count < MAX_FILE_GEN_ITERS;
    }

    @Override
    public List<StoreFile> next() {
      count += 1;
      ArrayList<StoreFile> files = new ArrayList<StoreFile>(NUM_FILES_GEN);
      for (int i = 0; i < NUM_FILES_GEN; i++) {
        files.add(createMockStoreFile(FILESIZE));
      }
      return files;
    }

    @Override
    public void remove() {

    }
  };
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:27,代码来源:ConstantSizeFileListGenerator.java

示例9: removeExcessFiles

import org.apache.hadoop.hbase.regionserver.StoreFile; //导入依赖的package包/类
/**
 * @param candidates pre-filtrate
 * @return filtered subset
 * take upto maxFilesToCompact from the start
 */
private ArrayList<StoreFile> removeExcessFiles(ArrayList<StoreFile> candidates,
    boolean isUserCompaction, boolean isMajorCompaction) {
  int excess = candidates.size() - comConf.getMaxFilesToCompact();
  if (excess > 0) {
    if (isMajorCompaction && isUserCompaction) {
      LOG.debug("Warning, compacting more than " + comConf.getMaxFilesToCompact() +
          " files because of a user-requested major compaction");
    } else {
      LOG.debug("Too many admissible files. Excluding " + excess
        + " files from compaction candidates");
      candidates.subList(comConf.getMaxFilesToCompact(), candidates.size()).clear();
    }
  }
  return candidates;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:21,代码来源:RatioBasedCompactionPolicy.java

示例10: getNextMajorCompactTime

import org.apache.hadoop.hbase.regionserver.StoreFile; //导入依赖的package包/类
/**
 * @param filesToCompact
 * @return When to run next major compaction
 */
public long getNextMajorCompactTime(final Collection<StoreFile> filesToCompact) {
  // default = 24hrs
  long ret = comConf.getMajorCompactionPeriod();
  if (ret > 0) {
    // default = 20% = +/- 4.8 hrs
    double jitterPct = comConf.getMajorCompactionJitter();
    if (jitterPct > 0) {
      long jitter = Math.round(ret * jitterPct);
      // deterministic jitter avoids a major compaction storm on restart
      Integer seed = StoreUtils.getDeterministicRandomSeed(filesToCompact);
      if (seed != null) {
        // Synchronized to ensure one user of random instance at a time.
        double rnd = -1;
        synchronized (this) {
          this.random.setSeed(seed);
          rnd = this.random.nextDouble();
        }
        ret += jitter - Math.round(2L * jitter * rnd);
      } else {
        ret = 0; // If seed is null, then no storefiles == no major compaction
      }
    }
  }
  return ret;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:30,代码来源:RatioBasedCompactionPolicy.java

示例11: estimateTargetKvs

import org.apache.hadoop.hbase.regionserver.StoreFile; //导入依赖的package包/类
private Pair<Long, Integer> estimateTargetKvs(Collection<StoreFile> files, double splitCount) {
  // If the size is larger than what we target, we don't want to split into proportionally
  // larger parts and then have to split again very soon. So, we will increase the multiplier
  // by one until we get small enough parts. E.g. 5Gb stripe that should have been split into
  // 2 parts when it was 3Gb will be split into 3x1.67Gb parts, rather than 2x2.5Gb parts.
  long totalSize = getTotalFileSize(files);
  long targetPartSize = config.getSplitPartSize();
  assert targetPartSize > 0 && splitCount > 0;
  double ratio = totalSize / (splitCount * targetPartSize); // ratio of real to desired size
  while (ratio > 1.0) {
    // Ratio of real to desired size if we increase the multiplier.
    double newRatio = totalSize / ((splitCount + 1.0) * targetPartSize);
    if ((1.0 / newRatio) >= ratio) break; // New ratio is < 1.0, but further than the last one.
    ratio = newRatio;
    splitCount += 1.0;
  }
  long kvCount = (long)(getTotalKvCount(files) / splitCount);
  return new Pair<Long, Integer>(kvCount, (int)Math.ceil(splitCount));
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:20,代码来源:StripeCompactionPolicy.java

示例12: initRowKeyList

import org.apache.hadoop.hbase.regionserver.StoreFile; //导入依赖的package包/类
private List<byte[]> initRowKeyList(FileSystem fileSystem, CacheConfig cacheConf,
    Configuration conf, TreeMap<byte[], TreeSet<byte[]>> indexFamilyMap,
    ScanRange.ScanRangeList rangeList) throws IOException {
  // init
  StoreFile bucketStoreFile =
      new StoreFile(fileSystem, LMDIndexParameters.getTmpBucketFilePath(file.getPath()), conf,
          cacheConf, BloomType.NONE);
  StoreFile secondaryStoreFile =
      new StoreFile(fileSystem, LMDIndexParameters.getTmpSecondaryFilePath(file.getPath()), conf,
          cacheConf, BloomType.NONE);
  StoreFileScanner bucketScanner = getStoreFileScanner(bucketStoreFile);
  StoreFileScanner secondaryScanner = getStoreFileScanner(secondaryStoreFile);
  // get hit buckets
  MDRange[] ranges = getRanges(indexFamilyMap, rangeList);
  List<LMDBucket> bucketList = getBucketRanges(bucketScanner, ranges);
  // scan rowkeys based on the buckets
  List<byte[]> rowkeyList = getRawRowkeyList(secondaryScanner, bucketList, ranges);
  // deinit
  bucketScanner.close();
  bucketStoreFile.closeReader(true);
  secondaryScanner.close();
  secondaryStoreFile.closeReader(true);
  return rowkeyList;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:25,代码来源:LMDIndexDirectStoreFileScanner.java

示例13: testSelection

import org.apache.hadoop.hbase.regionserver.StoreFile; //导入依赖的package包/类
@Test
public final void testSelection() throws Exception {
  long fileDiff = 0;
  for (List<StoreFile> storeFileList : generator) {
    List<StoreFile> currentFiles = new ArrayList<StoreFile>(18);
    for (StoreFile file : storeFileList) {
      currentFiles.add(file);
      currentFiles = runIteration(currentFiles);
    }
    fileDiff += (storeFileList.size() - currentFiles.size());
  }

  // print out tab delimited so that it can be used in excel/gdocs.
  System.out.println(
          cp.getClass().getSimpleName()
          + "\t" + fileGenClass.getSimpleName()
          + "\t" + max
          + "\t" + min
          + "\t" + ratio
          + "\t" + written
          + "\t" + fileDiff
  );
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:24,代码来源:PerfTestCompactionPolicies.java

示例14: testWithReferences

import org.apache.hadoop.hbase.regionserver.StoreFile; //导入依赖的package包/类
@Test
public void testWithReferences() throws Exception {
  StripeCompactionPolicy policy = createPolicy(HBaseConfiguration.create());
  StripeCompactor sc = mock(StripeCompactor.class);
  StoreFile ref = createFile();
  when(ref.isReference()).thenReturn(true);
  StripeInformationProvider si = mock(StripeInformationProvider.class);
  Collection<StoreFile> sfs = al(ref, createFile());
  when(si.getStorefiles()).thenReturn(sfs);

  assertTrue(policy.needsCompactions(si, al()));
  StripeCompactionPolicy.StripeCompactionRequest scr = policy.selectCompaction(si, al(), false);
  assertEquals(si.getStorefiles(), scr.getRequest().getFiles());
  scr.execute(sc, NoLimitCompactionThroughputController.INSTANCE, null);
  verify(sc, only()).compact(eq(scr.getRequest()), anyInt(), anyLong(), aryEq(OPEN_KEY),
    aryEq(OPEN_KEY), aryEq(OPEN_KEY), aryEq(OPEN_KEY),
    any(NoLimitCompactionThroughputController.class), any(User.class));
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:19,代码来源:TestStripeCompactionPolicy.java

示例15: runIteration

import org.apache.hadoop.hbase.regionserver.StoreFile; //导入依赖的package包/类
private List<StoreFile> runIteration(List<StoreFile> startingStoreFiles) throws IOException {

    List<StoreFile> storeFiles = new ArrayList<StoreFile>(startingStoreFiles);
    CompactionRequest req = cp.selectCompaction(
        storeFiles, new ArrayList<StoreFile>(), false, false, false);
    long newFileSize = 0;

    Collection<StoreFile> filesToCompact = req.getFiles();

    if (!filesToCompact.isEmpty()) {

      storeFiles = new ArrayList<StoreFile>(storeFiles);
      storeFiles.removeAll(filesToCompact);

      for (StoreFile storeFile : filesToCompact) {
        newFileSize += storeFile.getReader().length();
      }

      storeFiles.add(createMockStoreFileBytes(newFileSize));
    }

    written += newFileSize;
    return storeFiles;
  }
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:25,代码来源:PerfTestCompactionPolicies.java


注:本文中的org.apache.hadoop.hbase.regionserver.StoreFile类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。