本文整理汇总了Java中org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest类的典型用法代码示例。如果您正苦于以下问题:Java CompactionRequest类的具体用法?Java CompactionRequest怎么用?Java CompactionRequest使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
CompactionRequest类属于org.apache.hadoop.hbase.regionserver.compactions包,在下文中一共展示了CompactionRequest类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: requestCompactionInternal
import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest; //导入依赖的package包/类
private List<CompactionRequest> requestCompactionInternal(final Region r, final String why,
int p, List<Pair<CompactionRequest, Store>> requests, boolean selectNow, User user)
throws IOException {
// not a special compaction request, so make our own list
List<CompactionRequest> ret = null;
if (requests == null) {
ret = selectNow ? new ArrayList<CompactionRequest>(r.getStores().size()) : null;
for (Store s : r.getStores()) {
CompactionRequest cr = requestCompactionInternal(r, s, why, p, null, selectNow, user);
if (selectNow) ret.add(cr);
}
} else {
Preconditions.checkArgument(selectNow); // only system requests have selectNow == false
ret = new ArrayList<CompactionRequest>(requests.size());
for (Pair<CompactionRequest, Store> pair : requests) {
ret.add(requestCompaction(r, pair.getSecond(), why, p, pair.getFirst(), user));
}
}
return ret;
}
示例2: selectCompaction
import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest; //导入依赖的package包/类
private CompactionContext selectCompaction(final Region r, final Store s,
int priority, CompactionRequest request, User user) throws IOException {
CompactionContext compaction = s.requestCompaction(priority, request, user);
if (compaction == null) {
if(LOG.isDebugEnabled() && r.getRegionInfo() != null) {
LOG.debug("Not compacting " + r.getRegionInfo().getRegionNameAsString() +
" because compaction request was cancelled");
}
return null;
}
assert compaction.hasSelection();
if (priority != Store.NO_PRIORITY) {
compaction.getRequest().setPriority(priority);
}
return compaction;
}
示例3: testCompactionEmptyHFile
import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest; //导入依赖的package包/类
public void testCompactionEmptyHFile() throws IOException {
// Set TTL
ScanInfo oldScanInfo = store.getScanInfo();
ScanInfo newScanInfo = new ScanInfo(oldScanInfo.getConfiguration(), oldScanInfo.getFamily(),
oldScanInfo.getMinVersions(), oldScanInfo.getMaxVersions(), 600,
oldScanInfo.getKeepDeletedCells(), oldScanInfo.getTimeToPurgeDeletes(),
oldScanInfo.getComparator());
store.setScanInfo(newScanInfo);
// Do not compact empty store file
List<StoreFile> candidates = sfCreate(0);
for (StoreFile file : candidates) {
if (file instanceof MockStoreFile) {
MockStoreFile mockFile = (MockStoreFile) file;
mockFile.setTimeRangeTracker(new TimeRangeTracker(-1, -1));
mockFile.setEntries(0);
}
}
// Test Default compactions
CompactionRequest result = ((RatioBasedCompactionPolicy) store.storeEngine
.getCompactionPolicy()).selectCompaction(candidates,
new ArrayList<StoreFile>(), false, false, false);
assertTrue(result.getFiles().size() == 0);
store.setScanInfo(oldScanInfo);
}
示例4: testNonUserMajorCompactionRequest
import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest; //导入依赖的package包/类
/**
* Test for HBASE-5920 - Test user requested major compactions always occurring
*/
@Test
public void testNonUserMajorCompactionRequest() throws Exception {
Store store = r.getStore(COLUMN_FAMILY);
createStoreFile(r);
for (int i = 0; i < MAX_FILES_TO_COMPACT + 1; i++) {
createStoreFile(r);
}
store.triggerMajorCompaction();
CompactionRequest request = store.requestCompaction(Store.NO_PRIORITY, null).getRequest();
assertNotNull("Expected to receive a compaction request", request);
assertEquals(
"System-requested major compaction should not occur if there are too many store files",
false,
request.isMajor());
}
示例5: testUserMajorCompactionRequest
import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest; //导入依赖的package包/类
/**
* Test for HBASE-5920
*/
@Test
public void testUserMajorCompactionRequest() throws IOException{
Store store = r.getStore(COLUMN_FAMILY);
createStoreFile(r);
for (int i = 0; i < MAX_FILES_TO_COMPACT + 1; i++) {
createStoreFile(r);
}
store.triggerMajorCompaction();
CompactionRequest request = store.requestCompaction(Store.PRIORITY_USER, null).getRequest();
assertNotNull("Expected to receive a compaction request", request);
assertEquals(
"User-requested major compaction should always occur, even if there are too many store files",
true,
request.isMajor());
}
示例6: preCompactScannerOpen
import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest; //导入依赖的package包/类
@Override
public InternalScanner preCompactScannerOpen(ObserverContext<RegionCoprocessorEnvironment> c, Store store,
List<? extends KeyValueScanner> scanners, ScanType scanType, long earliestPutTs, InternalScanner s,
CompactionRequest request)
throws IOException {
// Get the latest tx snapshot state for the compaction
TransactionVisibilityState snapshot = cache.getLatestState();
// Record tx state before the compaction
if (compactionState != null) {
compactionState.record(request, snapshot);
}
// Also make sure to use the same snapshot for the compaction
return createStoreScanner(c.getEnvironment(), "compaction", snapshot, store, scanners, scanType, earliestPutTs);
}
示例7: compactStoreFiles
import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest; //导入依赖的package包/类
/**
* Execute the actual compaction job.
* If the compact once flag is not specified, execute the compaction until
* no more compactions are needed. Uses the Configuration settings provided.
*/
private void compactStoreFiles(final HRegion region, final Path familyDir,
final boolean compactOnce, final boolean major) throws IOException {
LOG.info("Compact table=" + region.getTableDesc().getNameAsString() +
" region=" + region.getRegionNameAsString() +
" family=" + familyDir.getName());
Store store = getStore(region, familyDir);
if (major) {
store.triggerMajorCompaction();
}
do {
CompactionRequest cr = store.requestCompaction(Store.PRIORITY_USER, null);
StoreFile storeFile = store.compact(cr);
if (storeFile != null) {
if (keepCompactedFiles && deleteCompacted) {
fs.delete(storeFile.getPath(), false);
}
}
} while (store.needsCompaction() && !compactOnce);
}
示例8: requestCompaction
import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest; //导入依赖的package包/类
@Override
public synchronized List<CompactionRequest> requestCompaction(final HRegion r, final String why,
int pri, final List<CompactionRequest> requests) throws IOException {
List<CompactionRequest> ret;
// not a special compaction request, so make out own list
if (requests == null) {
ret = new ArrayList<CompactionRequest>(r.getStores().size());
for (Store s : r.getStores().values()) {
ret.add(requestCompaction(r, s, why, pri, null));
}
} else {
ret = new ArrayList<CompactionRequest>(requests.size());
for (CompactionRequest request : requests) {
ret.add(requestCompaction(r, request.getStore(), why, pri, request));
}
}
return ret;
}
示例9: mWinterCompactLCCIndexLocal
import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest; //导入依赖的package包/类
CompactJob mWinterCompactLCCIndexLocal(final CompactionRequest request, final Path writtenPath)
throws IOException {
// check reference file, not supported yet!
boolean needToRebuild = false;
for (StoreFile sf : request.getFiles()) {
if (sf.getPath().getName().indexOf(".") != -1 || sf.isReference()) {
needToRebuild = true;
break;
}
}
CompactJob job = null;
if (needToRebuild) {
job = new RebuildCompactJob(request.getStore(), request, writtenPath);
} else {
job = new NormalCompactJob(request.getStore(), request, writtenPath);
}
CompactJobQueue.getInstance().addJob(job);
return job;
}
示例10: preCompactScannerOpen
import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest; //导入依赖的package包/类
/**
* See
* {@link RegionObserver#preCompactScannerOpen(ObserverContext, Store, List, ScanType, long, InternalScanner, CompactionRequest)}
*/
public InternalScanner preCompactScannerOpen(Store store, List<StoreFileScanner> scanners,
ScanType scanType, long earliestPutTs, CompactionRequest request) throws IOException {
ObserverContext<RegionCoprocessorEnvironment> ctx = null;
InternalScanner s = null;
for (RegionEnvironment env: coprocessors) {
if (env.getInstance() instanceof RegionObserver) {
ctx = ObserverContext.createAndPrepare(env, ctx);
try {
s = ((RegionObserver) env.getInstance()).preCompactScannerOpen(ctx, store, scanners,
scanType, earliestPutTs, s, request);
} catch (Throwable e) {
handleCoprocessorThrowable(env,e);
}
if (ctx.shouldComplete()) {
break;
}
}
}
return s;
}
示例11: postCompactSelection
import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest; //导入依赖的package包/类
/**
* Called after the {@link StoreFile}s to be compacted have been selected from the available
* candidates.
* @param store The store where compaction is being requested
* @param selected The store files selected to compact
* @param request custom compaction
*/
public void postCompactSelection(Store store, ImmutableList<StoreFile> selected,
CompactionRequest request) {
ObserverContext<RegionCoprocessorEnvironment> ctx = null;
for (RegionEnvironment env: coprocessors) {
if (env.getInstance() instanceof RegionObserver) {
ctx = ObserverContext.createAndPrepare(env, ctx);
try {
((RegionObserver) env.getInstance()).postCompactSelection(ctx, store, selected, request);
} catch (Throwable e) {
handleCoprocessorThrowableNoRethrow(env,e);
}
if (ctx.shouldComplete()) {
break;
}
}
}
}
示例12: preCompact
import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest; //导入依赖的package包/类
/**
* Called prior to rewriting the store files selected for compaction
* @param store the store being compacted
* @param scanner the scanner used to read store data during compaction
* @param request the compaction that will be executed
* @throws IOException
*/
public InternalScanner preCompact(Store store, InternalScanner scanner,
CompactionRequest request) throws IOException {
ObserverContext<RegionCoprocessorEnvironment> ctx = null;
boolean bypass = false;
for (RegionEnvironment env : coprocessors) {
if (env.getInstance() instanceof RegionObserver) {
ctx = ObserverContext.createAndPrepare(env, ctx);
try {
scanner = ((RegionObserver) env.getInstance()).preCompact(ctx, store, scanner, request);
} catch (Throwable e) {
handleCoprocessorThrowable(env, e);
}
bypass |= ctx.shouldBypass();
if (ctx.shouldComplete()) {
break;
}
}
}
return bypass ? null : scanner;
}
示例13: postCompact
import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest; //导入依赖的package包/类
/**
* Called after the store compaction has completed.
* @param store the store being compacted
* @param resultFile the new store file written during compaction
* @param request the compaction that is being executed
* @throws IOException
*/
public void postCompact(Store store, StoreFile resultFile, CompactionRequest request)
throws IOException {
ObserverContext<RegionCoprocessorEnvironment> ctx = null;
for (RegionEnvironment env: coprocessors) {
if (env.getInstance() instanceof RegionObserver) {
ctx = ObserverContext.createAndPrepare(env, ctx);
try {
((RegionObserver) env.getInstance()).postCompact(ctx, store, resultFile, request);
} catch (Throwable e) {
handleCoprocessorThrowable(env, e);
}
if (ctx.shouldComplete()) {
break;
}
}
}
}
示例14: requestCompactionInternal
import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest; //导入依赖的package包/类
private List<CompactionRequest> requestCompactionInternal(final HRegion r, final String why,
int p, List<Pair<CompactionRequest, Store>> requests, boolean selectNow) throws IOException {
// not a special compaction request, so make our own list
List<CompactionRequest> ret = null;
if (requests == null) {
ret = selectNow ? new ArrayList<CompactionRequest>(r.getStores().size()) : null;
for (Store s : r.getStores().values()) {
CompactionRequest cr = requestCompactionInternal(r, s, why, p, null, selectNow);
if (selectNow) ret.add(cr);
}
} else {
Preconditions.checkArgument(selectNow); // only system requests have selectNow == false
ret = new ArrayList<CompactionRequest>(requests.size());
for (Pair<CompactionRequest, Store> pair : requests) {
ret.add(requestCompaction(r, pair.getSecond(), why, p, pair.getFirst()));
}
}
return ret;
}
示例15: testCompactionEmptyHFile
import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest; //导入依赖的package包/类
public void testCompactionEmptyHFile() throws IOException {
// Set TTL
ScanInfo oldScanInfo = store.getScanInfo();
ScanInfo newScanInfo = new ScanInfo(oldScanInfo.getFamily(),
oldScanInfo.getMinVersions(), oldScanInfo.getMaxVersions(), 600,
oldScanInfo.getKeepDeletedCells(), oldScanInfo.getTimeToPurgeDeletes(),
oldScanInfo.getComparator());
store.setScanInfo(newScanInfo);
// Do not compact empty store file
List<StoreFile> candidates = sfCreate(0);
for (StoreFile file : candidates) {
if (file instanceof MockStoreFile) {
MockStoreFile mockFile = (MockStoreFile) file;
mockFile.setTimeRangeTracker(new TimeRangeTracker(-1, -1));
mockFile.setEntries(0);
}
}
// Test Default compactions
CompactionRequest result = ((RatioBasedCompactionPolicy) store.storeEngine
.getCompactionPolicy()).selectCompaction(candidates,
new ArrayList<StoreFile>(), false, false, false);
assertTrue(result.getFiles().size() == 0);
store.setScanInfo(oldScanInfo);
}