本文整理匯總了Java中org.apache.hadoop.hbase.HTableDescriptor.addCoprocessor方法的典型用法代碼示例。如果您正苦於以下問題:Java HTableDescriptor.addCoprocessor方法的具體用法?Java HTableDescriptor.addCoprocessor怎麽用?Java HTableDescriptor.addCoprocessor使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類org.apache.hadoop.hbase.HTableDescriptor
的用法示例。
在下文中一共展示了HTableDescriptor.addCoprocessor方法的8個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: runCoprocessorConnectionToRemoteTable
import org.apache.hadoop.hbase.HTableDescriptor; //導入方法依賴的package包/類
private void runCoprocessorConnectionToRemoteTable(Class<? extends BaseRegionObserver> clazz,
boolean[] completeCheck) throws Throwable {
HTableDescriptor primary = new HTableDescriptor(primaryTable);
primary.addFamily(new HColumnDescriptor(family));
// add our coprocessor
primary.addCoprocessor(clazz.getName());
HTableDescriptor other = new HTableDescriptor(otherTable);
other.addFamily(new HColumnDescriptor(family));
Admin admin = UTIL.getHBaseAdmin();
admin.createTable(primary);
admin.createTable(other);
Table table = new HTable(UTIL.getConfiguration(), TableName.valueOf("primary"));
Put p = new Put(new byte[] { 'a' });
p.add(family, null, new byte[] { 'a' });
table.put(p);
table.close();
Table target = new HTable(UTIL.getConfiguration(), otherTable);
assertTrue("Didn't complete update to target table!", completeCheck[0]);
assertEquals("Didn't find inserted row", 1, getKeyValueCount(target));
target.close();
}
示例2: testCreateDeleteTable
import org.apache.hadoop.hbase.HTableDescriptor; //導入方法依賴的package包/類
@Test (timeout=30000)
public void testCreateDeleteTable() throws IOException {
// Create table then get the single region for our new table.
HTableDescriptor hdt = HTU.createTableDescriptor("testCreateDeleteTable");
hdt.setRegionReplication(NB_SERVERS);
hdt.addCoprocessor(SlowMeCopro.class.getName());
Table table = HTU.createTable(hdt, new byte[][]{f}, HTU.getConfiguration());
Put p = new Put(row);
p.add(f, row, row);
table.put(p);
Get g = new Get(row);
Result r = table.get(g);
Assert.assertFalse(r.isStale());
try {
// But if we ask for stale we will get it
SlowMeCopro.cdl.set(new CountDownLatch(1));
g = new Get(row);
g.setConsistency(Consistency.TIMELINE);
r = table.get(g);
Assert.assertTrue(r.isStale());
SlowMeCopro.cdl.get().countDown();
} finally {
SlowMeCopro.cdl.get().countDown();
SlowMeCopro.sleepTime.set(0);
}
HTU.getHBaseAdmin().disableTable(hdt.getTableName());
HTU.deleteTable(hdt.getTableName());
}
示例3: beforeClass
import org.apache.hadoop.hbase.HTableDescriptor; //導入方法依賴的package包/類
@BeforeClass
public static void beforeClass() throws Exception {
// enable store file refreshing
HTU.getConfiguration().setInt(
StorefileRefresherChore.REGIONSERVER_STOREFILE_REFRESH_PERIOD, REFRESH_PERIOD);
HTU.getConfiguration().setBoolean("hbase.client.log.scanner.activity", true);
ConnectionUtils.setupMasterlessConnection(HTU.getConfiguration());
HTU.startMiniCluster(NB_SERVERS);
// Create table then get the single region for our new table.
HTableDescriptor hdt = HTU.createTableDescriptor(TestReplicasClient.class.getSimpleName());
hdt.addCoprocessor(SlowMeCopro.class.getName());
table = HTU.createTable(hdt, new byte[][]{f}, HTU.getConfiguration());
hriPrimary = table.getRegionLocation(row, false).getRegionInfo();
// mock a secondary region info to open
hriSecondary = new HRegionInfo(hriPrimary.getTable(), hriPrimary.getStartKey(),
hriPrimary.getEndKey(), hriPrimary.isSplit(), hriPrimary.getRegionId(), 1);
// No master
LOG.info("Master is going to be stopped");
TestRegionServerNoMaster.stopMasterAndAssignMeta(HTU);
Configuration c = new Configuration(HTU.getConfiguration());
c.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 1);
LOG.info("Master has stopped");
}
示例4: testCoprocessorTableEndpoint
import org.apache.hadoop.hbase.HTableDescriptor; //導入方法依賴的package包/類
@Test
public void testCoprocessorTableEndpoint() throws Throwable {
final TableName tableName = TableName.valueOf("testCoprocessorTableEndpoint");
HTableDescriptor desc = new HTableDescriptor(tableName);
desc.addFamily(new HColumnDescriptor(TEST_FAMILY));
desc.addCoprocessor(org.apache.hadoop.hbase.coprocessor.ColumnAggregationEndpoint.class.getName());
createTable(desc);
verifyTable(tableName);
}
示例5: testDynamicCoprocessorTableEndpoint
import org.apache.hadoop.hbase.HTableDescriptor; //導入方法依賴的package包/類
@Test
public void testDynamicCoprocessorTableEndpoint() throws Throwable {
final TableName tableName = TableName.valueOf("testDynamicCoprocessorTableEndpoint");
HTableDescriptor desc = new HTableDescriptor(tableName);
desc.addFamily(new HColumnDescriptor(TEST_FAMILY));
createTable(desc);
desc.addCoprocessor(org.apache.hadoop.hbase.coprocessor.ColumnAggregationEndpoint.class.getName());
updateTable(desc);
verifyTable(tableName);
}
示例6: installSlowingCoproc
import org.apache.hadoop.hbase.HTableDescriptor; //導入方法依賴的package包/類
/**
* Modify table {@code getTableName()} to carry {@link SlowMeCoproScanOperations}.
*/
private void installSlowingCoproc() throws IOException, InterruptedException {
int replicaCount = conf.getInt(NUM_REPLICA_COUNT_KEY, NUM_REPLICA_COUNT_DEFAULT);
if (replicaCount == NUM_REPLICA_COUNT_DEFAULT) return;
TableName t = getTablename();
Admin admin = util.getHBaseAdmin();
HTableDescriptor desc = admin.getTableDescriptor(t);
desc.addCoprocessor(SlowMeCoproScanOperations.class.getName());
HBaseTestingUtility.modifyTableSync(admin, desc);
//sleep for sometime. Hope is that the regions are closed/opened before
//the sleep returns. TODO: do this better
Thread.sleep(30000);
}
示例7: testChangeTable
import org.apache.hadoop.hbase.HTableDescriptor; //導入方法依賴的package包/類
@Test (timeout=120000)
public void testChangeTable() throws Exception {
HTableDescriptor hdt = HTU.createTableDescriptor("testChangeTable");
hdt.setRegionReplication(NB_SERVERS);
hdt.addCoprocessor(SlowMeCopro.class.getName());
Table table = HTU.createTable(hdt, new byte[][]{f}, HTU.getConfiguration());
// basic test: it should work.
Put p = new Put(row);
p.add(f, row, row);
table.put(p);
Get g = new Get(row);
Result r = table.get(g);
Assert.assertFalse(r.isStale());
// Add a CF, it should work.
HTableDescriptor bHdt = HTU.getHBaseAdmin().getTableDescriptor(hdt.getTableName());
HColumnDescriptor hcd = new HColumnDescriptor(row);
hdt.addFamily(hcd);
HTU.getHBaseAdmin().disableTable(hdt.getTableName());
HTU.getHBaseAdmin().modifyTable(hdt.getTableName(), hdt);
HTU.getHBaseAdmin().enableTable(hdt.getTableName());
HTableDescriptor nHdt = HTU.getHBaseAdmin().getTableDescriptor(hdt.getTableName());
Assert.assertEquals("fams=" + Arrays.toString(nHdt.getColumnFamilies()),
bHdt.getColumnFamilies().length + 1, nHdt.getColumnFamilies().length);
p = new Put(row);
p.add(row, row, row);
table.put(p);
g = new Get(row);
r = table.get(g);
Assert.assertFalse(r.isStale());
try {
SlowMeCopro.cdl.set(new CountDownLatch(1));
g = new Get(row);
g.setConsistency(Consistency.TIMELINE);
r = table.get(g);
Assert.assertTrue(r.isStale());
} finally {
SlowMeCopro.cdl.get().countDown();
SlowMeCopro.sleepTime.set(0);
}
HTU.getHBaseCluster().stopMaster(0);
Admin admin = new HBaseAdmin(HTU.getConfiguration());
nHdt =admin.getTableDescriptor(hdt.getTableName());
Assert.assertEquals("fams=" + Arrays.toString(nHdt.getColumnFamilies()),
bHdt.getColumnFamilies().length + 1, nHdt.getColumnFamilies().length);
admin.disableTable(hdt.getTableName());
admin.deleteTable(hdt.getTableName());
HTU.getHBaseCluster().startMaster();
admin.close();
}
示例8: enable
import org.apache.hadoop.hbase.HTableDescriptor; //導入方法依賴的package包/類
/**
* Enable constraints on a table.
* <p>
* Currently, if you attempt to add a constraint to the table, then
* Constraints will automatically be turned on.
*
* @param desc
* table description to add the processor
* @throws IOException
* If the {@link ConstraintProcessor} CP couldn't be added to the
* table.
*/
public static void enable(HTableDescriptor desc) throws IOException {
// if the CP has already been loaded, do nothing
String clazz = ConstraintProcessor.class.getName();
if (desc.hasCoprocessor(clazz)) {
return;
}
// add the constrain processor CP to the table
desc.addCoprocessor(clazz);
}