本文整理汇总了Java中org.apache.hadoop.hbase.coprocessor.RegionObserver类的典型用法代码示例。如果您正苦于以下问题:Java RegionObserver类的具体用法?Java RegionObserver怎么用?Java RegionObserver使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
RegionObserver类属于org.apache.hadoop.hbase.coprocessor包,在下文中一共展示了RegionObserver类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: postClose
import org.apache.hadoop.hbase.coprocessor.RegionObserver; //导入依赖的package包/类
/**
* Invoked after a region is closed
* @param abortRequested true if the server is aborting
*/
public void postClose(final boolean abortRequested) {
try {
execOperation(false, new RegionOperation() {
@Override
public void call(RegionObserver oserver, ObserverContext<RegionCoprocessorEnvironment> ctx)
throws IOException {
oserver.postClose(ctx, abortRequested);
}
public void postEnvCall(RegionEnvironment env) {
shutdown(env);
}
});
} catch (IOException e) {
LOG.warn(e);
}
}
示例2: preWALRestore
import org.apache.hadoop.hbase.coprocessor.RegionObserver; //导入依赖的package包/类
/**
* @param info
* @param logKey
* @param logEdit
* @return true if default behavior should be bypassed, false otherwise
* @throws IOException
*/
public boolean preWALRestore(final HRegionInfo info, final WALKey logKey,
final WALEdit logEdit) throws IOException {
return execOperation(coprocessors.isEmpty() ? null : new RegionOperation() {
@Override
public void call(RegionObserver oserver, ObserverContext<RegionCoprocessorEnvironment> ctx)
throws IOException {
// Once we don't need to support the legacy call, replace RegionOperation with a version
// that's ObserverContext<RegionEnvironment> and avoid this cast.
final RegionEnvironment env = (RegionEnvironment)ctx.getEnvironment();
if (env.useLegacyPre) {
if (logKey instanceof HLogKey) {
oserver.preWALRestore(ctx, info, (HLogKey)logKey, logEdit);
} else {
legacyWarning(oserver.getClass(), "There are wal keys present that are not HLogKey.");
}
} else {
oserver.preWALRestore(ctx, info, logKey, logEdit);
}
}
});
}
示例3: postWALRestore
import org.apache.hadoop.hbase.coprocessor.RegionObserver; //导入依赖的package包/类
/**
* @param info
* @param logKey
* @param logEdit
* @throws IOException
*/
public void postWALRestore(final HRegionInfo info, final WALKey logKey, final WALEdit logEdit)
throws IOException {
execOperation(coprocessors.isEmpty() ? null : new RegionOperation() {
@Override
public void call(RegionObserver oserver, ObserverContext<RegionCoprocessorEnvironment> ctx)
throws IOException {
// Once we don't need to support the legacy call, replace RegionOperation with a version
// that's ObserverContext<RegionEnvironment> and avoid this cast.
final RegionEnvironment env = (RegionEnvironment)ctx.getEnvironment();
if (env.useLegacyPost) {
if (logKey instanceof HLogKey) {
oserver.postWALRestore(ctx, info, (HLogKey)logKey, logEdit);
} else {
legacyWarning(oserver.getClass(), "There are wal keys present that are not HLogKey.");
}
} else {
oserver.postWALRestore(ctx, info, logKey, logEdit);
}
}
});
}
示例4: setupOnce
import org.apache.hadoop.hbase.coprocessor.RegionObserver; //导入依赖的package包/类
static void setupOnce() throws Exception {
// Using the our load balancer to control region plans
conf.setClass(HConstants.HBASE_MASTER_LOADBALANCER_CLASS,
MyLoadBalancer.class, LoadBalancer.class);
conf.setClass(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY,
MyRegionObserver.class, RegionObserver.class);
// Reduce the maximum attempts to speed up the test
conf.setInt("hbase.assignment.maximum.attempts", 3);
// Put meta on master to avoid meta server shutdown handling
conf.set("hbase.balancer.tablesOnMaster", "hbase:meta");
conf.setInt("hbase.master.maximum.ping.server.attempts", 3);
conf.setInt("hbase.master.ping.server.retry.sleep.interval", 1);
TEST_UTIL.startMiniCluster(1, 4, null, MyMaster.class, MyRegionServer.class);
admin = TEST_UTIL.getHBaseAdmin();
}
示例5: preOpen
import org.apache.hadoop.hbase.coprocessor.RegionObserver; //导入依赖的package包/类
/**
* Invoked before a region open
*/
public void preOpen(){
ObserverContext<RegionCoprocessorEnvironment> ctx = null;
for (RegionEnvironment env: coprocessors) {
if (env.getInstance() instanceof RegionObserver) {
ctx = ObserverContext.createAndPrepare(env, ctx);
try {
((RegionObserver)env.getInstance()).preOpen(ctx);
} catch (Throwable e) {
handleCoprocessorThrowableNoRethrow(env, e);
}
if (ctx.shouldComplete()) {
break;
}
}
}
}
示例6: postOpen
import org.apache.hadoop.hbase.coprocessor.RegionObserver; //导入依赖的package包/类
/**
* Invoked after a region open
*/
public void postOpen(){
ObserverContext<RegionCoprocessorEnvironment> ctx = null;
for (RegionEnvironment env: coprocessors) {
if (env.getInstance() instanceof RegionObserver) {
ctx = ObserverContext.createAndPrepare(env, ctx);
try {
((RegionObserver)env.getInstance()).postOpen(ctx);
} catch (Throwable e) {
handleCoprocessorThrowableNoRethrow(env, e);
}
if (ctx.shouldComplete()) {
break;
}
}
}
}
示例7: postBatchMutate
import org.apache.hadoop.hbase.coprocessor.RegionObserver; //导入依赖的package包/类
/**
* @param miniBatchOp
* @throws IOException
*/
public void postBatchMutate(
final MiniBatchOperationInProgress<Mutation> miniBatchOp) throws IOException {
ObserverContext<RegionCoprocessorEnvironment> ctx = null;
for (RegionEnvironment env : coprocessors) {
if (env.getInstance() instanceof RegionObserver) {
ctx = ObserverContext.createAndPrepare(env, ctx);
Thread currentThread = Thread.currentThread();
ClassLoader cl = currentThread.getContextClassLoader();
try {
currentThread.setContextClassLoader(env.getClassLoader());
((RegionObserver) env.getInstance()).postBatchMutate(ctx, miniBatchOp);
} catch (Throwable e) {
handleCoprocessorThrowable(env, e);
} finally {
currentThread.setContextClassLoader(cl);
}
if (ctx.shouldComplete()) {
break;
}
}
}
}
示例8: preCompactScannerOpen
import org.apache.hadoop.hbase.coprocessor.RegionObserver; //导入依赖的package包/类
/**
* See
* {@link RegionObserver#preCompactScannerOpen(ObserverContext, Store, List, ScanType, long, InternalScanner, CompactionRequest)}
*/
public InternalScanner preCompactScannerOpen(Store store, List<StoreFileScanner> scanners,
ScanType scanType, long earliestPutTs, CompactionRequest request) throws IOException {
ObserverContext<RegionCoprocessorEnvironment> ctx = null;
InternalScanner s = null;
for (RegionEnvironment env: coprocessors) {
if (env.getInstance() instanceof RegionObserver) {
ctx = ObserverContext.createAndPrepare(env, ctx);
try {
s = ((RegionObserver) env.getInstance()).preCompactScannerOpen(ctx, store, scanners,
scanType, earliestPutTs, s, request);
} catch (Throwable e) {
handleCoprocessorThrowable(env,e);
}
if (ctx.shouldComplete()) {
break;
}
}
}
return s;
}
示例9: preRollBackSplit
import org.apache.hadoop.hbase.coprocessor.RegionObserver; //导入依赖的package包/类
/**
* Invoked just before the rollback of a failed split is started
* @throws IOException
*/
public void preRollBackSplit() throws IOException {
ObserverContext<RegionCoprocessorEnvironment> ctx = null;
for (RegionEnvironment env : coprocessors) {
if (env.getInstance() instanceof RegionObserver) {
ctx = ObserverContext.createAndPrepare(env, ctx);
Thread currentThread = Thread.currentThread();
ClassLoader cl = currentThread.getContextClassLoader();
try {
currentThread.setContextClassLoader(env.getClassLoader());
((RegionObserver) env.getInstance()).preRollBackSplit(ctx);
} catch (Throwable e) {
handleCoprocessorThrowable(env, e);
} finally {
currentThread.setContextClassLoader(cl);
}
if (ctx.shouldComplete()) {
break;
}
}
}
}
示例10: postGetClosestRowBefore
import org.apache.hadoop.hbase.coprocessor.RegionObserver; //导入依赖的package包/类
/**
* @param row the row key
* @param family the family
* @param result the result set from the region
* @exception IOException Exception
*/
public void postGetClosestRowBefore(final byte[] row, final byte[] family,
final Result result) throws IOException {
ObserverContext<RegionCoprocessorEnvironment> ctx = null;
for (RegionEnvironment env: coprocessors) {
if (env.getInstance() instanceof RegionObserver) {
ctx = ObserverContext.createAndPrepare(env, ctx);
Thread currentThread = Thread.currentThread();
ClassLoader cl = currentThread.getContextClassLoader();
try {
currentThread.setContextClassLoader(env.getClassLoader());
((RegionObserver)env.getInstance()).postGetClosestRowBefore(ctx, row, family, result);
} catch (Throwable e) {
handleCoprocessorThrowable(env, e);
} finally {
currentThread.setContextClassLoader(cl);
}
if (ctx.shouldComplete()) {
break;
}
}
}
}
示例11: preFlush
import org.apache.hadoop.hbase.coprocessor.RegionObserver; //导入依赖的package包/类
/**
* Invoked before a memstore flush
* @throws IOException
*/
public InternalScanner preFlush(Store store, InternalScanner scanner) throws IOException {
ObserverContext<RegionCoprocessorEnvironment> ctx = null;
boolean bypass = false;
for (RegionEnvironment env: coprocessors) {
if (env.getInstance() instanceof RegionObserver) {
ctx = ObserverContext.createAndPrepare(env, ctx);
try {
scanner = ((RegionObserver)env.getInstance()).preFlush(
ctx, store, scanner);
} catch (Throwable e) {
handleCoprocessorThrowable(env,e);
}
bypass |= ctx.shouldBypass();
if (ctx.shouldComplete()) {
break;
}
}
}
return bypass ? null : scanner;
}
示例12: postRollBackSplit
import org.apache.hadoop.hbase.coprocessor.RegionObserver; //导入依赖的package包/类
/**
* Invoked just after the rollback of a failed split is done
* @throws IOException
*/
public void postRollBackSplit() throws IOException {
ObserverContext<RegionCoprocessorEnvironment> ctx = null;
for (RegionEnvironment env : coprocessors) {
if (env.getInstance() instanceof RegionObserver) {
ctx = ObserverContext.createAndPrepare(env, ctx);
Thread currentThread = Thread.currentThread();
ClassLoader cl = currentThread.getContextClassLoader();
try {
currentThread.setContextClassLoader(env.getClassLoader());
((RegionObserver) env.getInstance()).postRollBackSplit(ctx);
} catch (Throwable e) {
handleCoprocessorThrowable(env, e);
} finally {
currentThread.setContextClassLoader(cl);
}
if (ctx.shouldComplete()) {
break;
}
}
}
}
示例13: preFlushScannerOpen
import org.apache.hadoop.hbase.coprocessor.RegionObserver; //导入依赖的package包/类
/**
* See
* {@link RegionObserver#preFlush(ObserverContext, Store, KeyValueScanner)}
*/
public InternalScanner preFlushScannerOpen(Store store, KeyValueScanner memstoreScanner) throws IOException {
ObserverContext<RegionCoprocessorEnvironment> ctx = null;
InternalScanner s = null;
for (RegionEnvironment env : coprocessors) {
if (env.getInstance() instanceof RegionObserver) {
ctx = ObserverContext.createAndPrepare(env, ctx);
try {
s = ((RegionObserver) env.getInstance()).preFlushScannerOpen(ctx, store, memstoreScanner, s);
} catch (Throwable e) {
handleCoprocessorThrowable(env, e);
}
if (ctx.shouldComplete()) {
break;
}
}
}
return s;
}
示例14: postClose
import org.apache.hadoop.hbase.coprocessor.RegionObserver; //导入依赖的package包/类
/**
* Invoked after a region is closed
* @param abortRequested true if the server is aborting
*/
public void postClose(final boolean abortRequested) {
ObserverContext<RegionCoprocessorEnvironment> ctx = null;
for (RegionEnvironment env: coprocessors) {
if (env.getInstance() instanceof RegionObserver) {
ctx = ObserverContext.createAndPrepare(env, ctx);
Thread currentThread = Thread.currentThread();
ClassLoader cl = currentThread.getContextClassLoader();
try {
currentThread.setContextClassLoader(env.getClassLoader());
((RegionObserver) env.getInstance()).postClose(ctx, abortRequested);
} catch (Throwable e) {
handleCoprocessorThrowableNoRethrow(env, e);
} finally {
currentThread.setContextClassLoader(cl);
}
}
shutdown(env);
}
}
示例15: preSplit
import org.apache.hadoop.hbase.coprocessor.RegionObserver; //导入依赖的package包/类
/**
* Invoked just before a split
* @throws IOException
*/
public void preSplit() throws IOException {
ObserverContext<RegionCoprocessorEnvironment> ctx = null;
for (RegionEnvironment env: coprocessors) {
if (env.getInstance() instanceof RegionObserver) {
ctx = ObserverContext.createAndPrepare(env, ctx);
try {
((RegionObserver)env.getInstance()).preSplit(ctx);
} catch (Throwable e) {
handleCoprocessorThrowable(env, e);
}
if (ctx.shouldComplete()) {
break;
}
}
}
}