当前位置: 首页>>代码示例>>Java>>正文


Java CompactionRequest.getFiles方法代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest.getFiles方法的典型用法代码示例。如果您正苦于以下问题:Java CompactionRequest.getFiles方法的具体用法?Java CompactionRequest.getFiles怎么用?Java CompactionRequest.getFiles使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest的用法示例。


在下文中一共展示了CompactionRequest.getFiles方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: mWinterCompactLCCIndexLocal

import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest; //导入方法依赖的package包/类
CompactJob mWinterCompactLCCIndexLocal(final CompactionRequest request, final Path writtenPath)
    throws IOException {
  // check reference file, not supported yet!
  boolean needToRebuild = false;
  for (StoreFile sf : request.getFiles()) {
    if (sf.getPath().getName().indexOf(".") != -1 || sf.isReference()) {
      needToRebuild = true;
      break;
    }
  }
  CompactJob job = null;
  if (needToRebuild) {
    job = new RebuildCompactJob(request.getStore(), request, writtenPath);
  } else {
    job = new NormalCompactJob(request.getStore(), request, writtenPath);
  }
  CompactJobQueue.getInstance().addJob(job);
  return job;
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:20,代码来源:Compactor.java

示例2: RebuildCompactJob

import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest; //导入方法依赖的package包/类
public RebuildCompactJob(HStore store, CompactionRequest request, Path writtenPath)
    throws IOException {
  super(store, request, writtenPath);
  StringBuilder sb = new StringBuilder();
  sb.append("RebuildCompactJob construction, hdfsPath: ").append(tmpHDFSPath);
  sb.append(", with ").append(request.getFiles().size())
      .append(" store files compacted, they are: ");
  for (StoreFile sf : request.getFiles()) {
    sb.append(sf.getPath()).append(", ");
  }
  printMessage(sb.toString());
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:13,代码来源:CompactJobQueue.java

示例3: compactEquals

import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest; //导入方法依赖的package包/类
void compactEquals(List<StoreFile> candidates, boolean forcemajor, boolean isOffPeak,
    long ... expected)
throws IOException {
  store.forceMajor = forcemajor;
  //Test Default compactions
  CompactionRequest result = ((RatioBasedCompactionPolicy)store.storeEngine.getCompactionPolicy())
      .selectCompaction(candidates, new ArrayList<StoreFile>(), false, isOffPeak, forcemajor);
  List<StoreFile> actual = new ArrayList<StoreFile>(result.getFiles());
  if (isOffPeak && !forcemajor) {
    assertTrue(result.isOffPeak());
  }
  assertEquals(Arrays.toString(expected), Arrays.toString(getSizes(actual)));
  store.forceMajor = false;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:15,代码来源:TestDefaultCompactSelection.java

示例4: NormalCompactJob

import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest; //导入方法依赖的package包/类
public NormalCompactJob(HStore store, CompactionRequest request, Path writtenPath)
    throws IOException {
  super(store, request, writtenPath);
  printMessage("NormalCompactJob construction, hdfsPath: " + tmpHDFSPath);
  relatedJobs = new ArrayList<>();
  iFileMap = new TreeMap<>(Bytes.BYTES_COMPARATOR);
  missingPathMap = new TreeMap<>();
  indexParameters = store.getLCIndexParameters();
  // LC_Home/[tableName]/[regionId]/.tmp/[HFileId].lcindex
  for (Map.Entry<byte[], TreeSet<byte[]>> entry : store.indexTableRelation.getIndexFamilyMap()
      .entrySet()) {
    for (byte[] qualifier : entry.getValue()) {
      List<Path> iFilesToBeCompacted = new ArrayList<>();
      for (StoreFile compactedHFile : request.getFiles()) {
        // if family not match, skip
        if (!compactedHFile.getPath().getParent().getName()
            .equals(Bytes.toString(entry.getKey()))) {
          continue;
        }
        Path iFile = indexParameters.getLocalIFile(compactedHFile.getPath(), qualifier);
        iFilesToBeCompacted.add(iFile);
        LOG.info(
            "LCDBG, minor compact need iFile " + iFile.toString() + " for qualifier " + Bytes
                .toString(qualifier) + ", target: " + tmpHDFSPath);
        if (!indexParameters.getLCIndexFileSystem().exists(iFile)) {
          printMessage("finding the iFile to be compacted: " + iFile);
          // when missing, first find it in commit job, then in remote job
          BasicJob job =
              CommitJobQueue.getInstance().findJobByDestPath(compactedHFile.getPath());
          if (job == null) {
            job = CompleteCompactionJobQueue.getInstance()
                .findJobByDestPath(compactedHFile.getPath());
            if (job == null) {
              job = new RemoteJob(store, iFile, compactedHFile.getPath(), false);
              RemoteJobQueue.getInstance().addJob(job);
            }
          }
          printMessage("LCDBG, missing file " + iFile + " on HFile " + compactedHFile.getPath()
              + " relies on job: " + job.getClass().getName());
          missingPathMap.put(iFile, job);
        }
      }
      iFileMap.put(qualifier, iFilesToBeCompacted);
      printMessage(
          "LCDBG, minor compact on qualifier " + Bytes.toString(qualifier) + " with iFile.size="
              + iFilesToBeCompacted.size() + ", target: " + tmpHDFSPath);
    }
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:50,代码来源:CompactJobQueue.java

示例5: NormalCompactJob

import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest; //导入方法依赖的package包/类
public NormalCompactJob(Store store, CompactionRequest request, Path writtenPath)
    throws IOException {
  super(store, request, writtenPath);
  if (printForDebug) {
    System.out.println("winter NormalCompactJob construction, hdfsPath: " + tmpHDFSPath);
  }
  relatedJobs = new ArrayList<CommitJob>();
  allIdxPaths = new TreeMap<Path, List<Path>>();
  missPathJob = new TreeMap<Path, BasicJob>();
  destLCIndexDir = store.getLocalBaseDirByFileName(writtenPath);
  List<Path> lcQualifierDirs = getLCQualifierDirs();
  for (Path lcQualifierDir : lcQualifierDirs) {
    // lcQualifierDir = /hbase/lcc/AAA/f/.lccindex/Q1-...-Q4
    List<Path> idxPaths = new ArrayList<Path>();
    for (StoreFile rawCompactedFile : request.getFiles()) {
      // rawCompactedFile = lcc/AAA/f/BBB, AAA is always the same
      // lcIdxFile = lcc/AAA/f/.lccindex/qualifier/BBB. In this loop, Bi is changing
      Path lcIdxFile = new Path(lcQualifierDir, rawCompactedFile.getPath().getName());
      idxPaths.add(lcIdxFile);
      if (!store.localfs.exists(lcIdxFile)) {
        if (printForDebug) {
          System.out.println("winter finding the lcIdxFile to be compacted: " + lcIdxFile);
        }
        CommitJob commitJob =
            CommitJobQueue.getInstance().findJobByDestPath(rawCompactedFile.getPath());
        if (commitJob == null) {
          // not necessary to process now, just wait
          CompleteCompactionJob completeJob =
              CompleteCompactionJobQueue.getInstance().findJobByDestPath(
                rawCompactedFile.getPath());
          if (completeJob == null) {
            System.out
                .println("winter file not found on locally, try to find it on remote servers: "
                    + lcIdxFile + ", corresponding raw path: " + rawCompactedFile.getPath());
            RemoteJob job = new RemoteJob(store, lcIdxFile, rawCompactedFile.getPath(), false);
            RemoteJobQueue.getInstance().addJob(job);
            missPathJob.put(lcIdxFile, job);
          } else {
            System.out.println("winter CompactJob found file in complete job queue");
            missPathJob.put(lcIdxFile, completeJob);
          }
        } else {
          System.out.println("winter CompactJob found file in commit job queue");
          missPathJob.put(lcIdxFile, commitJob);
        }
      }
    }
    allIdxPaths.put(lcQualifierDir, idxPaths);
  }
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:51,代码来源:CompactJobQueue.java

示例6: testDeleteExpiredStoreFiles

import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest; //导入方法依赖的package包/类
@Test
public void testDeleteExpiredStoreFiles() throws Exception {
  int storeFileNum = 4;
  int ttl = 4;
  IncrementingEnvironmentEdge edge = new IncrementingEnvironmentEdge();
  EnvironmentEdgeManagerTestHelper.injectEdge(edge);
  
  Configuration conf = HBaseConfiguration.create();
  // Enable the expired store file deletion
  conf.setBoolean("hbase.store.delete.expired.storefile", true);
  HColumnDescriptor hcd = new HColumnDescriptor(family);
  hcd.setTimeToLive(ttl);
  init(name.getMethodName(), conf, hcd);

  long sleepTime = this.store.getScanInfo().getTtl() / storeFileNum;
  long timeStamp;
  // There are 4 store files and the max time stamp difference among these
  // store files will be (this.store.ttl / storeFileNum)
  for (int i = 1; i <= storeFileNum; i++) {
    LOG.info("Adding some data for the store file #" + i);
    timeStamp = EnvironmentEdgeManager.currentTimeMillis();
    this.store.add(new KeyValue(row, family, qf1, timeStamp, (byte[]) null));
    this.store.add(new KeyValue(row, family, qf2, timeStamp, (byte[]) null));
    this.store.add(new KeyValue(row, family, qf3, timeStamp, (byte[]) null));
    flush(i);
    edge.incrementTime(sleepTime);
  }

  // Verify the total number of store files
  Assert.assertEquals(storeFileNum, this.store.getStorefiles().size());

  // Each compaction request will find one expired store file and delete it
  // by the compaction.
  for (int i = 1; i <= storeFileNum; i++) {
    // verify the expired store file.
    CompactionContext compaction = this.store.requestCompaction();
    CompactionRequest cr = compaction.getRequest();
    // the first is expired normally.
    // If not the first compaction, there is another empty store file,
    List<StoreFile> files = new ArrayList<StoreFile>(cr.getFiles());
    Assert.assertEquals(Math.min(i, 2), cr.getFiles().size());
    for (int j = 0; j < files.size(); j++) {
      Assert.assertTrue(files.get(j).getReader().getMaxTimestamp() < (edge
          .currentTimeMillis() - this.store.getScanInfo().getTtl()));
    }
    // Verify that the expired store file is compacted to an empty store file.
    // Default compaction policy creates just one and only one compacted file.
    StoreFile compactedFile = this.store.compact(compaction).get(0);
    // It is an empty store file.
    Assert.assertEquals(0, compactedFile.getReader().getEntries());

    // Let the next store file expired.
    edge.incrementTime(sleepTime);
  }
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:56,代码来源:TestStore.java


注:本文中的org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest.getFiles方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。