本文整理汇总了Java中org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost.load方法的典型用法代码示例。如果您正苦于以下问题:Java RegionCoprocessorHost.load方法的具体用法?Java RegionCoprocessorHost.load怎么用?Java RegionCoprocessorHost.load使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost
的用法示例。
在下文中一共展示了RegionCoprocessorHost.load方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: reopenRegion
import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost; //导入方法依赖的package包/类
Region reopenRegion(final Region closedRegion, Class<?> ... implClasses)
throws IOException {
//HRegionInfo info = new HRegionInfo(tableName, null, null, false);
Region r = HRegion.openHRegion(closedRegion, null);
// this following piece is a hack. currently a coprocessorHost
// is secretly loaded at OpenRegionHandler. we don't really
// start a region server here, so just manually create cphost
// and set it to region.
Configuration conf = TEST_UTIL.getConfiguration();
RegionCoprocessorHost host = new RegionCoprocessorHost(r, null, conf);
((HRegion)r).setCoprocessorHost(host);
for (Class<?> implClass : implClasses) {
host.load(implClass, Coprocessor.PRIORITY_USER, conf);
}
// we need to manually call pre- and postOpen here since the
// above load() is not the real case for CP loading. A CP is
// expected to be loaded by default from 1) configuration; or 2)
// HTableDescriptor. If it's loaded after HRegion initialized,
// the pre- and postOpen() won't be triggered automatically.
// Here we have to call pre and postOpen explicitly.
host.preOpen();
host.postOpen();
return r;
}
示例2: initHRegion
import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost; //导入方法依赖的package包/类
Region initHRegion (TableName tableName, String callingMethod,
Configuration conf, Class<?> [] implClasses, byte [][] families)
throws IOException {
HTableDescriptor htd = new HTableDescriptor(tableName);
for(byte [] family : families) {
htd.addFamily(new HColumnDescriptor(family));
}
HRegionInfo info = new HRegionInfo(tableName, null, null, false);
Path path = new Path(DIR + callingMethod);
HRegion r = HRegion.createHRegion(info, path, conf, htd);
// this following piece is a hack.
RegionCoprocessorHost host = new RegionCoprocessorHost(r, null, conf);
r.setCoprocessorHost(host);
for (Class<?> implClass : implClasses) {
host.load(implClass, Coprocessor.PRIORITY_USER, conf);
Coprocessor c = host.findCoprocessor(implClass.getName());
assertNotNull(c);
}
// Here we have to call pre and postOpen explicitly.
host.preOpen();
host.postOpen();
return r;
}
示例3: testRegionObserverScanTimeStacking
import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost; //导入方法依赖的package包/类
@Test
public void testRegionObserverScanTimeStacking() throws Exception {
byte[] ROW = Bytes.toBytes("testRow");
byte[] TABLE = Bytes.toBytes(getClass().getName());
byte[] A = Bytes.toBytes("A");
byte[][] FAMILIES = new byte[][] { A };
Configuration conf = HBaseConfiguration.create();
Region region = initHRegion(TABLE, getClass().getName(), conf, FAMILIES);
RegionCoprocessorHost h = region.getCoprocessorHost();
h.load(NoDataFromScan.class, Coprocessor.PRIORITY_HIGHEST, conf);
h.load(EmptyRegionObsever.class, Coprocessor.PRIORITY_USER, conf);
Put put = new Put(ROW);
put.add(A, A, A);
region.put(put);
Get get = new Get(ROW);
Result r = region.get(get);
assertNull(
"Got an unexpected number of rows - no data should be returned with the NoDataFromScan coprocessor. Found: "
+ r, r.listCells());
}
示例4: testRegionObserverFlushTimeStacking
import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost; //导入方法依赖的package包/类
@Test
public void testRegionObserverFlushTimeStacking() throws Exception {
byte[] ROW = Bytes.toBytes("testRow");
byte[] TABLE = Bytes.toBytes(getClass().getName());
byte[] A = Bytes.toBytes("A");
byte[][] FAMILIES = new byte[][] { A };
Configuration conf = HBaseConfiguration.create();
Region region = initHRegion(TABLE, getClass().getName(), conf, FAMILIES);
RegionCoprocessorHost h = region.getCoprocessorHost();
h.load(NoDataFromFlush.class, Coprocessor.PRIORITY_HIGHEST, conf);
h.load(EmptyRegionObsever.class, Coprocessor.PRIORITY_USER, conf);
// put a row and flush it to disk
Put put = new Put(ROW);
put.add(A, A, A);
region.put(put);
region.flush(true);
Get get = new Get(ROW);
Result r = region.get(get);
assertNull(
"Got an unexpected number of rows - no data should be returned with the NoDataFromScan coprocessor. Found: "
+ r, r.listCells());
}
示例5: reopenRegion
import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost; //导入方法依赖的package包/类
HRegion reopenRegion(final HRegion closedRegion, Class<?> ... implClasses)
throws IOException {
//HRegionInfo info = new HRegionInfo(tableName, null, null, false);
HRegion r = new HRegion(closedRegion);
r.initialize();
// this following piece is a hack. currently a coprocessorHost
// is secretly loaded at OpenRegionHandler. we don't really
// start a region server here, so just manually create cphost
// and set it to region.
RegionCoprocessorHost host = new RegionCoprocessorHost(r, null, conf);
r.setCoprocessorHost(host);
for (Class<?> implClass : implClasses) {
host.load(implClass, Coprocessor.PRIORITY_USER, conf);
}
// we need to manually call pre- and postOpen here since the
// above load() is not the real case for CP loading. A CP is
// expected to be loaded by default from 1) configuration; or 2)
// HTableDescriptor. If it's loaded after HRegion initialized,
// the pre- and postOpen() won't be triggered automatically.
// Here we have to call pre and postOpen explicitly.
host.preOpen();
host.postOpen();
return r;
}
示例6: initHRegion
import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost; //导入方法依赖的package包/类
HRegion initHRegion (byte [] tableName, String callingMethod,
Configuration conf, Class<?> [] implClasses, byte [][] families)
throws IOException {
HTableDescriptor htd = new HTableDescriptor(tableName);
for(byte [] family : families) {
htd.addFamily(new HColumnDescriptor(family));
}
HRegionInfo info = new HRegionInfo(tableName, null, null, false);
Path path = new Path(DIR + callingMethod);
HRegion r = HRegion.createHRegion(info, path, conf, htd);
// this following piece is a hack.
RegionCoprocessorHost host = new RegionCoprocessorHost(r, null, conf);
r.setCoprocessorHost(host);
for (Class<?> implClass : implClasses) {
host.load(implClass, Coprocessor.PRIORITY_USER, conf);
Coprocessor c = host.findCoprocessor(implClass.getName());
assertNotNull(c);
}
// Here we have to call pre and postOpen explicitly.
host.preOpen();
host.postOpen();
return r;
}
示例7: testRegionObserverScanTimeStacking
import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost; //导入方法依赖的package包/类
@Test
public void testRegionObserverScanTimeStacking() throws Exception {
byte[] ROW = Bytes.toBytes("testRow");
byte[] TABLE = Bytes.toBytes(getClass().getName());
byte[] A = Bytes.toBytes("A");
byte[][] FAMILIES = new byte[][] { A };
Configuration conf = HBaseConfiguration.create();
HRegion region = initHRegion(TABLE, getClass().getName(), conf, FAMILIES);
RegionCoprocessorHost h = region.getCoprocessorHost();
h.load(NoDataFromScan.class, Coprocessor.PRIORITY_HIGHEST, conf);
h.load(EmptyRegionObsever.class, Coprocessor.PRIORITY_USER, conf);
Put put = new Put(ROW);
put.add(A, A, A);
region.put(put);
Get get = new Get(ROW);
Result r = region.get(get);
assertNull(
"Got an unexpected number of rows - no data should be returned with the NoDataFromScan coprocessor. Found: "
+ r, r.list());
}
示例8: testRegionObserverFlushTimeStacking
import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost; //导入方法依赖的package包/类
@Test
public void testRegionObserverFlushTimeStacking() throws Exception {
byte[] ROW = Bytes.toBytes("testRow");
byte[] TABLE = Bytes.toBytes(getClass().getName());
byte[] A = Bytes.toBytes("A");
byte[][] FAMILIES = new byte[][] { A };
Configuration conf = HBaseConfiguration.create();
HRegion region = initHRegion(TABLE, getClass().getName(), conf, FAMILIES);
RegionCoprocessorHost h = region.getCoprocessorHost();
h.load(NoDataFromFlush.class, Coprocessor.PRIORITY_HIGHEST, conf);
h.load(EmptyRegionObsever.class, Coprocessor.PRIORITY_USER, conf);
// put a row and flush it to disk
Put put = new Put(ROW);
put.add(A, A, A);
region.put(put);
region.flushcache();
Get get = new Get(ROW);
Result r = region.get(get);
assertNull(
"Got an unexpected number of rows - no data should be returned with the NoDataFromScan coprocessor. Found: "
+ r, r.list());
}
示例9: reopenRegion
import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost; //导入方法依赖的package包/类
HRegion reopenRegion(final HRegion closedRegion, Class<?> ... implClasses)
throws IOException {
//HRegionInfo info = new HRegionInfo(tableName, null, null, false);
HRegion r = HRegion.openHRegion(closedRegion, null);
// this following piece is a hack. currently a coprocessorHost
// is secretly loaded at OpenRegionHandler. we don't really
// start a region server here, so just manually create cphost
// and set it to region.
Configuration conf = TEST_UTIL.getConfiguration();
RegionCoprocessorHost host = new RegionCoprocessorHost(r, null, conf);
r.setCoprocessorHost(host);
for (Class<?> implClass : implClasses) {
host.load(implClass, Coprocessor.PRIORITY_USER, conf);
}
// we need to manually call pre- and postOpen here since the
// above load() is not the real case for CP loading. A CP is
// expected to be loaded by default from 1) configuration; or 2)
// HTableDescriptor. If it's loaded after HRegion initialized,
// the pre- and postOpen() won't be triggered automatically.
// Here we have to call pre and postOpen explicitly.
host.preOpen();
host.postOpen();
return r;
}
示例10: initHRegion
import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost; //导入方法依赖的package包/类
HRegion initHRegion (TableName tableName, String callingMethod,
Configuration conf, Class<?> [] implClasses, byte [][] families)
throws IOException {
HTableDescriptor htd = new HTableDescriptor(tableName);
for(byte [] family : families) {
htd.addFamily(new HColumnDescriptor(family));
}
HRegionInfo info = new HRegionInfo(tableName, null, null, false);
Path path = new Path(DIR + callingMethod);
HRegion r = HRegion.createHRegion(info, path, conf, htd);
// this following piece is a hack.
RegionCoprocessorHost host = new RegionCoprocessorHost(r, null, conf);
r.setCoprocessorHost(host);
for (Class<?> implClass : implClasses) {
host.load(implClass, Coprocessor.PRIORITY_USER, conf);
Coprocessor c = host.findCoprocessor(implClass.getName());
assertNotNull(c);
}
// Here we have to call pre and postOpen explicitly.
host.preOpen();
host.postOpen();
return r;
}
示例11: testRegionObserverScanTimeStacking
import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost; //导入方法依赖的package包/类
@Test
public void testRegionObserverScanTimeStacking() throws Exception {
byte[] ROW = Bytes.toBytes("testRow");
byte[] TABLE = Bytes.toBytes(getClass().getName());
byte[] A = Bytes.toBytes("A");
byte[][] FAMILIES = new byte[][] { A };
Configuration conf = HBaseConfiguration.create();
HRegion region = initHRegion(TABLE, getClass().getName(), conf, FAMILIES);
RegionCoprocessorHost h = region.getCoprocessorHost();
h.load(NoDataFromScan.class, Coprocessor.PRIORITY_HIGHEST, conf);
h.load(EmptyRegionObsever.class, Coprocessor.PRIORITY_USER, conf);
Put put = new Put(ROW);
put.add(A, A, A);
region.put(put);
Get get = new Get(ROW);
Result r = region.get(get);
assertNull(
"Got an unexpected number of rows - no data should be returned with the NoDataFromScan coprocessor. Found: "
+ r, r.listCells());
}
示例12: testRegionObserverFlushTimeStacking
import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost; //导入方法依赖的package包/类
@Test
public void testRegionObserverFlushTimeStacking() throws Exception {
byte[] ROW = Bytes.toBytes("testRow");
byte[] TABLE = Bytes.toBytes(getClass().getName());
byte[] A = Bytes.toBytes("A");
byte[][] FAMILIES = new byte[][] { A };
Configuration conf = HBaseConfiguration.create();
HRegion region = initHRegion(TABLE, getClass().getName(), conf, FAMILIES);
RegionCoprocessorHost h = region.getCoprocessorHost();
h.load(NoDataFromFlush.class, Coprocessor.PRIORITY_HIGHEST, conf);
h.load(EmptyRegionObsever.class, Coprocessor.PRIORITY_USER, conf);
// put a row and flush it to disk
Put put = new Put(ROW);
put.add(A, A, A);
region.put(put);
region.flushcache();
Get get = new Get(ROW);
Result r = region.get(get);
assertNull(
"Got an unexpected number of rows - no data should be returned with the NoDataFromScan coprocessor. Found: "
+ r, r.listCells());
}
示例13: testRegionObserverScanTimeStacking
import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost; //导入方法依赖的package包/类
@Test
public void testRegionObserverScanTimeStacking() throws Exception {
byte[] ROW = Bytes.toBytes("testRow");
byte[] TABLE = Bytes.toBytes(getClass().getName());
byte[] A = Bytes.toBytes("A");
byte[][] FAMILIES = new byte[][] { A };
// Use new HTU to not overlap with the DFS cluster started in #CompactionStacking
Configuration conf = new HBaseTestingUtility().getConfiguration();
HRegion region = initHRegion(TABLE, getClass().getName(), conf, FAMILIES);
RegionCoprocessorHost h = region.getCoprocessorHost();
h.load(NoDataFromScan.class, Coprocessor.PRIORITY_HIGHEST, conf);
h.load(EmptyRegionObsever.class, Coprocessor.PRIORITY_USER, conf);
Put put = new Put(ROW);
put.addColumn(A, A, A);
region.put(put);
Get get = new Get(ROW);
Result r = region.get(get);
assertNull(
"Got an unexpected number of rows - no data should be returned with the NoDataFromScan coprocessor. Found: "
+ r, r.listCells());
HBaseTestingUtility.closeRegionAndWAL(region);
}
示例14: testRegionObserverFlushTimeStacking
import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost; //导入方法依赖的package包/类
@Test
public void testRegionObserverFlushTimeStacking() throws Exception {
byte[] ROW = Bytes.toBytes("testRow");
byte[] TABLE = Bytes.toBytes(getClass().getName());
byte[] A = Bytes.toBytes("A");
byte[][] FAMILIES = new byte[][] { A };
// Use new HTU to not overlap with the DFS cluster started in #CompactionStacking
Configuration conf = new HBaseTestingUtility().getConfiguration();
HRegion region = initHRegion(TABLE, getClass().getName(), conf, FAMILIES);
RegionCoprocessorHost h = region.getCoprocessorHost();
h.load(NoDataFromFlush.class, Coprocessor.PRIORITY_HIGHEST, conf);
h.load(EmptyRegionObsever.class, Coprocessor.PRIORITY_USER, conf);
// put a row and flush it to disk
Put put = new Put(ROW);
put.addColumn(A, A, A);
region.put(put);
region.flush(true);
Get get = new Get(ROW);
Result r = region.get(get);
assertNull(
"Got an unexpected number of rows - no data should be returned with the NoDataFromScan coprocessor. Found: "
+ r, r.listCells());
HBaseTestingUtility.closeRegionAndWAL(region);
}
示例15: testRegionObserverStacking
import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost; //导入方法依赖的package包/类
public void testRegionObserverStacking() throws Exception {
byte[] ROW = Bytes.toBytes("testRow");
byte[] TABLE = Bytes.toBytes(this.getClass().getSimpleName());
byte[] A = Bytes.toBytes("A");
byte[][] FAMILIES = new byte[][] { A } ;
Configuration conf = HBaseConfiguration.create();
HRegion region = initHRegion(TABLE, getClass().getName(),
conf, FAMILIES);
RegionCoprocessorHost h = region.getCoprocessorHost();
h.load(ObserverA.class, Coprocessor.PRIORITY_HIGHEST, conf);
h.load(ObserverB.class, Coprocessor.PRIORITY_USER, conf);
h.load(ObserverC.class, Coprocessor.PRIORITY_LOWEST, conf);
Put put = new Put(ROW);
put.add(A, A, A);
region.put(put);
Coprocessor c = h.findCoprocessor(ObserverA.class.getName());
long idA = ((ObserverA)c).id;
c = h.findCoprocessor(ObserverB.class.getName());
long idB = ((ObserverB)c).id;
c = h.findCoprocessor(ObserverC.class.getName());
long idC = ((ObserverC)c).id;
assertTrue(idA < idB);
assertTrue(idB < idC);
}