当前位置: 首页>>代码示例>>Java>>正文


Java HTableDescriptor.addCoprocessor方法代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.HTableDescriptor.addCoprocessor方法的典型用法代码示例。如果您正苦于以下问题:Java HTableDescriptor.addCoprocessor方法的具体用法?Java HTableDescriptor.addCoprocessor怎么用?Java HTableDescriptor.addCoprocessor使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hbase.HTableDescriptor的用法示例。


在下文中一共展示了HTableDescriptor.addCoprocessor方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: runCoprocessorConnectionToRemoteTable

import org.apache.hadoop.hbase.HTableDescriptor; //导入方法依赖的package包/类
private void runCoprocessorConnectionToRemoteTable(Class<? extends BaseRegionObserver> clazz,
    boolean[] completeCheck) throws Throwable {
  HTableDescriptor primary = new HTableDescriptor(primaryTable);
  primary.addFamily(new HColumnDescriptor(family));
  // add our coprocessor
  primary.addCoprocessor(clazz.getName());

  HTableDescriptor other = new HTableDescriptor(otherTable);
  other.addFamily(new HColumnDescriptor(family));


  Admin admin = UTIL.getHBaseAdmin();
  admin.createTable(primary);
  admin.createTable(other);

  Table table = new HTable(UTIL.getConfiguration(), TableName.valueOf("primary"));
  Put p = new Put(new byte[] { 'a' });
  p.add(family, null, new byte[] { 'a' });
  table.put(p);
  table.close();

  Table target = new HTable(UTIL.getConfiguration(), otherTable);
  assertTrue("Didn't complete update to target table!", completeCheck[0]);
  assertEquals("Didn't find inserted row", 1, getKeyValueCount(target));
  target.close();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:27,代码来源:TestOpenTableInCoprocessor.java

示例2: testCreateDeleteTable

import org.apache.hadoop.hbase.HTableDescriptor; //导入方法依赖的package包/类
@Test (timeout=30000)
public void testCreateDeleteTable() throws IOException {
  // Create table then get the single region for our new table.
  HTableDescriptor hdt = HTU.createTableDescriptor("testCreateDeleteTable");
  hdt.setRegionReplication(NB_SERVERS);
  hdt.addCoprocessor(SlowMeCopro.class.getName());
  Table table = HTU.createTable(hdt, new byte[][]{f}, HTU.getConfiguration());

  Put p = new Put(row);
  p.add(f, row, row);
  table.put(p);

  Get g = new Get(row);
  Result r = table.get(g);
  Assert.assertFalse(r.isStale());

  try {
    // But if we ask for stale we will get it
    SlowMeCopro.cdl.set(new CountDownLatch(1));
    g = new Get(row);
    g.setConsistency(Consistency.TIMELINE);
    r = table.get(g);
    Assert.assertTrue(r.isStale());
    SlowMeCopro.cdl.get().countDown();
  } finally {
    SlowMeCopro.cdl.get().countDown();
    SlowMeCopro.sleepTime.set(0);
  }

  HTU.getHBaseAdmin().disableTable(hdt.getTableName());
  HTU.deleteTable(hdt.getTableName());
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:33,代码来源:TestReplicaWithCluster.java

示例3: beforeClass

import org.apache.hadoop.hbase.HTableDescriptor; //导入方法依赖的package包/类
@BeforeClass
public static void beforeClass() throws Exception {
  // enable store file refreshing
  HTU.getConfiguration().setInt(
      StorefileRefresherChore.REGIONSERVER_STOREFILE_REFRESH_PERIOD, REFRESH_PERIOD);
  HTU.getConfiguration().setBoolean("hbase.client.log.scanner.activity", true);
  ConnectionUtils.setupMasterlessConnection(HTU.getConfiguration());
  HTU.startMiniCluster(NB_SERVERS);

  // Create table then get the single region for our new table.
  HTableDescriptor hdt = HTU.createTableDescriptor(TestReplicasClient.class.getSimpleName());
  hdt.addCoprocessor(SlowMeCopro.class.getName());
  table = HTU.createTable(hdt, new byte[][]{f}, HTU.getConfiguration());

  hriPrimary = table.getRegionLocation(row, false).getRegionInfo();

  // mock a secondary region info to open
  hriSecondary = new HRegionInfo(hriPrimary.getTable(), hriPrimary.getStartKey(),
      hriPrimary.getEndKey(), hriPrimary.isSplit(), hriPrimary.getRegionId(), 1);

  // No master
  LOG.info("Master is going to be stopped");
  TestRegionServerNoMaster.stopMasterAndAssignMeta(HTU);
  Configuration c = new Configuration(HTU.getConfiguration());
  c.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 1);
  LOG.info("Master has stopped");
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:28,代码来源:TestReplicasClient.java

示例4: testCoprocessorTableEndpoint

import org.apache.hadoop.hbase.HTableDescriptor; //导入方法依赖的package包/类
@Test
public void testCoprocessorTableEndpoint() throws Throwable {    
  final TableName tableName = TableName.valueOf("testCoprocessorTableEndpoint");

  HTableDescriptor desc = new HTableDescriptor(tableName);
  desc.addFamily(new HColumnDescriptor(TEST_FAMILY));
  desc.addCoprocessor(org.apache.hadoop.hbase.coprocessor.ColumnAggregationEndpoint.class.getName());

  createTable(desc);
  verifyTable(tableName);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:12,代码来源:TestCoprocessorTableEndpoint.java

示例5: testDynamicCoprocessorTableEndpoint

import org.apache.hadoop.hbase.HTableDescriptor; //导入方法依赖的package包/类
@Test
public void testDynamicCoprocessorTableEndpoint() throws Throwable {    
  final TableName tableName = TableName.valueOf("testDynamicCoprocessorTableEndpoint");

  HTableDescriptor desc = new HTableDescriptor(tableName);
  desc.addFamily(new HColumnDescriptor(TEST_FAMILY));

  createTable(desc);

  desc.addCoprocessor(org.apache.hadoop.hbase.coprocessor.ColumnAggregationEndpoint.class.getName());
  updateTable(desc);

  verifyTable(tableName);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:15,代码来源:TestCoprocessorTableEndpoint.java

示例6: installSlowingCoproc

import org.apache.hadoop.hbase.HTableDescriptor; //导入方法依赖的package包/类
/**
 * Modify table {@code getTableName()} to carry {@link SlowMeCoproScanOperations}.
 */
private void installSlowingCoproc() throws IOException, InterruptedException {
  int replicaCount = conf.getInt(NUM_REPLICA_COUNT_KEY, NUM_REPLICA_COUNT_DEFAULT);
  if (replicaCount == NUM_REPLICA_COUNT_DEFAULT) return;

  TableName t = getTablename();
  Admin admin = util.getHBaseAdmin();
  HTableDescriptor desc = admin.getTableDescriptor(t);
  desc.addCoprocessor(SlowMeCoproScanOperations.class.getName());
  HBaseTestingUtility.modifyTableSync(admin, desc);
  //sleep for sometime. Hope is that the regions are closed/opened before 
  //the sleep returns. TODO: do this better
  Thread.sleep(30000);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:17,代码来源:IntegrationTestBulkLoad.java

示例7: testChangeTable

import org.apache.hadoop.hbase.HTableDescriptor; //导入方法依赖的package包/类
@Test (timeout=120000)
public void testChangeTable() throws Exception {
  HTableDescriptor hdt = HTU.createTableDescriptor("testChangeTable");
  hdt.setRegionReplication(NB_SERVERS);
  hdt.addCoprocessor(SlowMeCopro.class.getName());
  Table table = HTU.createTable(hdt, new byte[][]{f}, HTU.getConfiguration());

  // basic test: it should work.
  Put p = new Put(row);
  p.add(f, row, row);
  table.put(p);

  Get g = new Get(row);
  Result r = table.get(g);
  Assert.assertFalse(r.isStale());

  // Add a CF, it should work.
  HTableDescriptor bHdt = HTU.getHBaseAdmin().getTableDescriptor(hdt.getTableName());
  HColumnDescriptor hcd = new HColumnDescriptor(row);
  hdt.addFamily(hcd);
  HTU.getHBaseAdmin().disableTable(hdt.getTableName());
  HTU.getHBaseAdmin().modifyTable(hdt.getTableName(), hdt);
  HTU.getHBaseAdmin().enableTable(hdt.getTableName());
  HTableDescriptor nHdt = HTU.getHBaseAdmin().getTableDescriptor(hdt.getTableName());
  Assert.assertEquals("fams=" + Arrays.toString(nHdt.getColumnFamilies()),
      bHdt.getColumnFamilies().length + 1, nHdt.getColumnFamilies().length);

  p = new Put(row);
  p.add(row, row, row);
  table.put(p);

  g = new Get(row);
  r = table.get(g);
  Assert.assertFalse(r.isStale());

  try {
    SlowMeCopro.cdl.set(new CountDownLatch(1));
    g = new Get(row);
    g.setConsistency(Consistency.TIMELINE);
    r = table.get(g);
    Assert.assertTrue(r.isStale());
  } finally {
    SlowMeCopro.cdl.get().countDown();
    SlowMeCopro.sleepTime.set(0);
  }

  HTU.getHBaseCluster().stopMaster(0);
  Admin admin = new HBaseAdmin(HTU.getConfiguration());
  nHdt =admin.getTableDescriptor(hdt.getTableName());
  Assert.assertEquals("fams=" + Arrays.toString(nHdt.getColumnFamilies()),
      bHdt.getColumnFamilies().length + 1, nHdt.getColumnFamilies().length);

  admin.disableTable(hdt.getTableName());
  admin.deleteTable(hdt.getTableName());
  HTU.getHBaseCluster().startMaster();
  admin.close();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:58,代码来源:TestReplicaWithCluster.java

示例8: enable

import org.apache.hadoop.hbase.HTableDescriptor; //导入方法依赖的package包/类
/**
 * Enable constraints on a table.
 * <p>
 * Currently, if you attempt to add a constraint to the table, then
 * Constraints will automatically be turned on.
 * 
 * @param desc
 *          table description to add the processor
 * @throws IOException
 *           If the {@link ConstraintProcessor} CP couldn't be added to the
 *           table.
 */
public static void enable(HTableDescriptor desc) throws IOException {
  // if the CP has already been loaded, do nothing
  String clazz = ConstraintProcessor.class.getName();
  if (desc.hasCoprocessor(clazz)) {
    return;
  }

  // add the constrain processor CP to the table
  desc.addCoprocessor(clazz);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:23,代码来源:Constraints.java


注:本文中的org.apache.hadoop.hbase.HTableDescriptor.addCoprocessor方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。