本文整理汇总了Java中org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost类的典型用法代码示例。如果您正苦于以下问题:Java RegionCoprocessorHost类的具体用法?Java RegionCoprocessorHost怎么用?Java RegionCoprocessorHost使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
RegionCoprocessorHost类属于org.apache.hadoop.hbase.regionserver包,在下文中一共展示了RegionCoprocessorHost类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: reopenRegion
import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost; //导入依赖的package包/类
Region reopenRegion(final Region closedRegion, Class<?> ... implClasses)
throws IOException {
//HRegionInfo info = new HRegionInfo(tableName, null, null, false);
Region r = HRegion.openHRegion(closedRegion, null);
// this following piece is a hack. currently a coprocessorHost
// is secretly loaded at OpenRegionHandler. we don't really
// start a region server here, so just manually create cphost
// and set it to region.
Configuration conf = TEST_UTIL.getConfiguration();
RegionCoprocessorHost host = new RegionCoprocessorHost(r, null, conf);
((HRegion)r).setCoprocessorHost(host);
for (Class<?> implClass : implClasses) {
host.load(implClass, Coprocessor.PRIORITY_USER, conf);
}
// we need to manually call pre- and postOpen here since the
// above load() is not the real case for CP loading. A CP is
// expected to be loaded by default from 1) configuration; or 2)
// HTableDescriptor. If it's loaded after HRegion initialized,
// the pre- and postOpen() won't be triggered automatically.
// Here we have to call pre and postOpen explicitly.
host.preOpen();
host.postOpen();
return r;
}
示例2: initHRegion
import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost; //导入依赖的package包/类
Region initHRegion (TableName tableName, String callingMethod,
Configuration conf, Class<?> [] implClasses, byte [][] families)
throws IOException {
HTableDescriptor htd = new HTableDescriptor(tableName);
for(byte [] family : families) {
htd.addFamily(new HColumnDescriptor(family));
}
HRegionInfo info = new HRegionInfo(tableName, null, null, false);
Path path = new Path(DIR + callingMethod);
HRegion r = HRegion.createHRegion(info, path, conf, htd);
// this following piece is a hack.
RegionCoprocessorHost host = new RegionCoprocessorHost(r, null, conf);
r.setCoprocessorHost(host);
for (Class<?> implClass : implClasses) {
host.load(implClass, Coprocessor.PRIORITY_USER, conf);
Coprocessor c = host.findCoprocessor(implClass.getName());
assertNotNull(c);
}
// Here we have to call pre and postOpen explicitly.
host.preOpen();
host.postOpen();
return r;
}
示例3: initHRegion
import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost; //导入依赖的package包/类
HRegion initHRegion (byte [] tableName, String callingMethod,
Configuration conf, byte [] ... families) throws IOException {
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
for(byte [] family : families) {
htd.addFamily(new HColumnDescriptor(family));
}
HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false);
Path path = new Path(DIR + callingMethod);
HRegion r = HRegion.createHRegion(info, path, conf, htd);
// this following piece is a hack. currently a coprocessorHost
// is secretly loaded at OpenRegionHandler. we don't really
// start a region server here, so just manually create cphost
// and set it to region.
RegionCoprocessorHost host = new RegionCoprocessorHost(r, null, conf);
r.setCoprocessorHost(host);
return r;
}
示例4: initHRegion
import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost; //导入依赖的package包/类
Region initHRegion(byte[] tableName, String callingMethod, Configuration conf,
byte[]... families) throws IOException {
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
for (byte[] family : families) {
htd.addFamily(new HColumnDescriptor(family));
}
HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false);
Path path = new Path(DIR + callingMethod);
HRegion r = HRegion.createHRegion(info, path, conf, htd);
// this following piece is a hack. currently a coprocessorHost
// is secretly loaded at OpenRegionHandler. we don't really
// start a region server here, so just manually create cphost
// and set it to region.
RegionCoprocessorHost host = new RegionCoprocessorHost(r, null, conf);
r.setCoprocessorHost(host);
return r;
}
示例5: testRegionObserverScanTimeStacking
import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost; //导入依赖的package包/类
@Test
public void testRegionObserverScanTimeStacking() throws Exception {
byte[] ROW = Bytes.toBytes("testRow");
byte[] TABLE = Bytes.toBytes(getClass().getName());
byte[] A = Bytes.toBytes("A");
byte[][] FAMILIES = new byte[][] { A };
Configuration conf = HBaseConfiguration.create();
Region region = initHRegion(TABLE, getClass().getName(), conf, FAMILIES);
RegionCoprocessorHost h = region.getCoprocessorHost();
h.load(NoDataFromScan.class, Coprocessor.PRIORITY_HIGHEST, conf);
h.load(EmptyRegionObsever.class, Coprocessor.PRIORITY_USER, conf);
Put put = new Put(ROW);
put.add(A, A, A);
region.put(put);
Get get = new Get(ROW);
Result r = region.get(get);
assertNull(
"Got an unexpected number of rows - no data should be returned with the NoDataFromScan coprocessor. Found: "
+ r, r.listCells());
}
示例6: testRegionObserverFlushTimeStacking
import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost; //导入依赖的package包/类
@Test
public void testRegionObserverFlushTimeStacking() throws Exception {
byte[] ROW = Bytes.toBytes("testRow");
byte[] TABLE = Bytes.toBytes(getClass().getName());
byte[] A = Bytes.toBytes("A");
byte[][] FAMILIES = new byte[][] { A };
Configuration conf = HBaseConfiguration.create();
Region region = initHRegion(TABLE, getClass().getName(), conf, FAMILIES);
RegionCoprocessorHost h = region.getCoprocessorHost();
h.load(NoDataFromFlush.class, Coprocessor.PRIORITY_HIGHEST, conf);
h.load(EmptyRegionObsever.class, Coprocessor.PRIORITY_USER, conf);
// put a row and flush it to disk
Put put = new Put(ROW);
put.add(A, A, A);
region.put(put);
region.flush(true);
Get get = new Get(ROW);
Result r = region.get(get);
assertNull(
"Got an unexpected number of rows - no data should be returned with the NoDataFromScan coprocessor. Found: "
+ r, r.listCells());
}
示例7: testRegionCoprocessorHostDefaults
import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost; //导入依赖的package包/类
@Test
public void testRegionCoprocessorHostDefaults() throws Exception {
Configuration conf = new Configuration(CONF);
HRegion region = mock(HRegion.class);
when(region.getRegionInfo()).thenReturn(REGIONINFO);
when(region.getTableDesc()).thenReturn(TABLEDESC);
RegionServerServices rsServices = mock(RegionServerServices.class);
systemCoprocessorLoaded.set(false);
tableCoprocessorLoaded.set(false);
new RegionCoprocessorHost(region, rsServices, conf);
assertEquals("System coprocessors loading default was not honored",
systemCoprocessorLoaded.get(),
CoprocessorHost.DEFAULT_COPROCESSORS_ENABLED);
assertEquals("Table coprocessors loading default was not honored",
tableCoprocessorLoaded.get(),
CoprocessorHost.DEFAULT_COPROCESSORS_ENABLED &&
CoprocessorHost.DEFAULT_USER_COPROCESSORS_ENABLED);
}
示例8: testRegionCoprocessorHostAllDisabled
import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost; //导入依赖的package包/类
@Test
public void testRegionCoprocessorHostAllDisabled() throws Exception {
Configuration conf = new Configuration(CONF);
conf.setBoolean(CoprocessorHost.COPROCESSORS_ENABLED_CONF_KEY, false);
HRegion region = mock(HRegion.class);
when(region.getRegionInfo()).thenReturn(REGIONINFO);
when(region.getTableDesc()).thenReturn(TABLEDESC);
RegionServerServices rsServices = mock(RegionServerServices.class);
systemCoprocessorLoaded.set(false);
tableCoprocessorLoaded.set(false);
new RegionCoprocessorHost(region, rsServices, conf);
assertFalse("System coprocessors should not have been loaded",
systemCoprocessorLoaded.get());
assertFalse("Table coprocessors should not have been loaded",
tableCoprocessorLoaded.get());
}
示例9: testRegionCoprocessorHostTableLoadingDisabled
import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost; //导入依赖的package包/类
@Test
public void testRegionCoprocessorHostTableLoadingDisabled() throws Exception {
Configuration conf = new Configuration(CONF);
conf.setBoolean(CoprocessorHost.COPROCESSORS_ENABLED_CONF_KEY, true); // if defaults change
conf.setBoolean(CoprocessorHost.USER_COPROCESSORS_ENABLED_CONF_KEY, false);
HRegion region = mock(HRegion.class);
when(region.getRegionInfo()).thenReturn(REGIONINFO);
when(region.getTableDesc()).thenReturn(TABLEDESC);
RegionServerServices rsServices = mock(RegionServerServices.class);
systemCoprocessorLoaded.set(false);
tableCoprocessorLoaded.set(false);
new RegionCoprocessorHost(region, rsServices, conf);
assertTrue("System coprocessors should have been loaded",
systemCoprocessorLoaded.get());
assertFalse("Table coprocessors should not have been loaded",
tableCoprocessorLoaded.get());
}
示例10: verifyMethodResult
import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost; //导入依赖的package包/类
private void verifyMethodResult(Class c, String methodName[], byte[] tableName,
Object value[]) throws IOException {
try {
for (JVMClusterUtil.RegionServerThread t : cluster.getRegionServerThreads()) {
for (HRegionInfo r : t.getRegionServer().getOnlineRegions()) {
if (!Arrays.equals(r.getTableName(), tableName)) {
continue;
}
RegionCoprocessorHost cph = t.getRegionServer().getOnlineRegion(r.getRegionName()).
getCoprocessorHost();
Coprocessor cp = cph.findCoprocessor(c.getName());
assertNotNull(cp);
for (int i = 0; i < methodName.length; ++i) {
Method m = c.getMethod(methodName[i]);
Object o = m.invoke(cp);
assertTrue("Result of " + c.getName() + "." + methodName[i]
+ " is expected to be " + value[i].toString()
+ ", while we get " + o.toString(), o.equals(value[i]));
}
}
}
} catch (Exception e) {
throw new IOException(e.toString());
}
}
示例11: reopenRegion
import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost; //导入依赖的package包/类
HRegion reopenRegion(final HRegion closedRegion, Class<?> ... implClasses)
throws IOException {
//HRegionInfo info = new HRegionInfo(tableName, null, null, false);
HRegion r = new HRegion(closedRegion);
r.initialize();
// this following piece is a hack. currently a coprocessorHost
// is secretly loaded at OpenRegionHandler. we don't really
// start a region server here, so just manually create cphost
// and set it to region.
RegionCoprocessorHost host = new RegionCoprocessorHost(r, null, conf);
r.setCoprocessorHost(host);
for (Class<?> implClass : implClasses) {
host.load(implClass, Coprocessor.PRIORITY_USER, conf);
}
// we need to manually call pre- and postOpen here since the
// above load() is not the real case for CP loading. A CP is
// expected to be loaded by default from 1) configuration; or 2)
// HTableDescriptor. If it's loaded after HRegion initialized,
// the pre- and postOpen() won't be triggered automatically.
// Here we have to call pre and postOpen explicitly.
host.preOpen();
host.postOpen();
return r;
}
示例12: initHRegion
import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost; //导入依赖的package包/类
HRegion initHRegion (byte [] tableName, String callingMethod,
Configuration conf, Class<?> [] implClasses, byte [][] families)
throws IOException {
HTableDescriptor htd = new HTableDescriptor(tableName);
for(byte [] family : families) {
htd.addFamily(new HColumnDescriptor(family));
}
HRegionInfo info = new HRegionInfo(tableName, null, null, false);
Path path = new Path(DIR + callingMethod);
HRegion r = HRegion.createHRegion(info, path, conf, htd);
// this following piece is a hack.
RegionCoprocessorHost host = new RegionCoprocessorHost(r, null, conf);
r.setCoprocessorHost(host);
for (Class<?> implClass : implClasses) {
host.load(implClass, Coprocessor.PRIORITY_USER, conf);
Coprocessor c = host.findCoprocessor(implClass.getName());
assertNotNull(c);
}
// Here we have to call pre and postOpen explicitly.
host.preOpen();
host.postOpen();
return r;
}
示例13: initHRegion
import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost; //导入依赖的package包/类
HRegion initHRegion (byte [] tableName, String callingMethod,
Configuration conf, byte [] ... families) throws IOException {
HTableDescriptor htd = new HTableDescriptor(tableName);
for(byte [] family : families) {
htd.addFamily(new HColumnDescriptor(family));
}
HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false);
Path path = new Path(DIR + callingMethod);
HRegion r = HRegion.createHRegion(info, path, conf, htd);
// this following piece is a hack. currently a coprocessorHost
// is secretly loaded at OpenRegionHandler. we don't really
// start a region server here, so just manually create cphost
// and set it to region.
RegionCoprocessorHost host = new RegionCoprocessorHost(r, null, conf);
r.setCoprocessorHost(host);
return r;
}
示例14: initHRegion
import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost; //导入依赖的package包/类
HRegion initHRegion(byte[] tableName, String callingMethod, Configuration conf,
byte[]... families) throws IOException {
HTableDescriptor htd = new HTableDescriptor(tableName);
for (byte[] family : families) {
htd.addFamily(new HColumnDescriptor(family));
}
HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false);
Path path = new Path(DIR + callingMethod);
HRegion r = HRegion.createHRegion(info, path, conf, htd);
// this following piece is a hack. currently a coprocessorHost
// is secretly loaded at OpenRegionHandler. we don't really
// start a region server here, so just manually create cphost
// and set it to region.
RegionCoprocessorHost host = new RegionCoprocessorHost(r, null, conf);
r.setCoprocessorHost(host);
return r;
}
示例15: testRegionObserverScanTimeStacking
import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost; //导入依赖的package包/类
@Test
public void testRegionObserverScanTimeStacking() throws Exception {
byte[] ROW = Bytes.toBytes("testRow");
byte[] TABLE = Bytes.toBytes(getClass().getName());
byte[] A = Bytes.toBytes("A");
byte[][] FAMILIES = new byte[][] { A };
Configuration conf = HBaseConfiguration.create();
HRegion region = initHRegion(TABLE, getClass().getName(), conf, FAMILIES);
RegionCoprocessorHost h = region.getCoprocessorHost();
h.load(NoDataFromScan.class, Coprocessor.PRIORITY_HIGHEST, conf);
h.load(EmptyRegionObsever.class, Coprocessor.PRIORITY_USER, conf);
Put put = new Put(ROW);
put.add(A, A, A);
region.put(put);
Get get = new Get(ROW);
Result r = region.get(get);
assertNull(
"Got an unexpected number of rows - no data should be returned with the NoDataFromScan coprocessor. Found: "
+ r, r.list());
}