当前位置: 首页>>代码示例>>Java>>正文


Java CoprocessorHost类代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.coprocessor.CoprocessorHost的典型用法代码示例。如果您正苦于以下问题:Java CoprocessorHost类的具体用法?Java CoprocessorHost怎么用?Java CoprocessorHost使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


CoprocessorHost类属于org.apache.hadoop.hbase.coprocessor包,在下文中一共展示了CoprocessorHost类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: postOpen

import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; //导入依赖的package包/类
/****************************** Region related hooks ******************************/

  @Override
  public void postOpen(ObserverContext<RegionCoprocessorEnvironment> e) {
    // Read the entire labels table and populate the zk
    if (e.getEnvironment().getRegion().getRegionInfo().getTable().equals(LABELS_TABLE_NAME)) {
      this.labelsRegion = true;
      synchronized (this) {
        this.accessControllerAvailable = CoprocessorHost.getLoadedCoprocessors()
          .contains(AccessController.class.getName());
      }
      // Defer the init of VisibilityLabelService on labels region until it is in recovering state.
      if (!e.getEnvironment().getRegion().isRecovering()) {
        initVisibilityLabelService(e.getEnvironment());
      }
    } else {
      checkAuths = e.getEnvironment().getConfiguration()
          .getBoolean(VisibilityConstants.CHECK_AUTHS_FOR_MUTATION, false);
      initVisibilityLabelService(e.getEnvironment());
    }
  }
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:22,代码来源:VisibilityController.java

示例2: setUp

import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; //导入依赖的package包/类
@BeforeClass
public static void setUp() throws Exception {
  Properties conf = MiniKdc.createConf();
  conf.put(MiniKdc.DEBUG, true);
  KDC = new MiniKdc(conf, new File(TEST_UTIL.getDataTestDir("kdc").toUri().getPath()));
  KDC.start();
  USERNAME = UserGroupInformation.getLoginUser().getShortUserName();
  PRINCIPAL = USERNAME + "/" + HOST;
  HTTP_PRINCIPAL = "HTTP/" + HOST;
  KDC.createPrincipal(KEYTAB_FILE, PRINCIPAL, HTTP_PRINCIPAL);
  TEST_UTIL.startMiniZKCluster();

  HBaseKerberosUtils.setKeytabFileForTesting(KEYTAB_FILE.getAbsolutePath());
  HBaseKerberosUtils.setPrincipalForTesting(PRINCIPAL + "@" + KDC.getRealm());
  HBaseKerberosUtils.setSecuredConfiguration(TEST_UTIL.getConfiguration());
  setHdfsSecuredConfiguration(TEST_UTIL.getConfiguration());
  UserGroupInformation.setConfiguration(TEST_UTIL.getConfiguration());
  TEST_UTIL.getConfiguration().setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY,
    TokenProvider.class.getName());
  TEST_UTIL.startMiniDFSCluster(1);
  Path rootdir = TEST_UTIL.getDataTestDirOnTestFS("TestGenerateDelegationToken");
  FSUtils.setRootDir(TEST_UTIL.getConfiguration(), rootdir);
  CLUSTER = new LocalHBaseCluster(TEST_UTIL.getConfiguration(), 1);
  CLUSTER.startup();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:26,代码来源:TestGenerateDelegationToken.java

示例3: setUpBeforeClass

import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; //导入依赖的package包/类
@BeforeClass
public static void setUpBeforeClass() throws Exception {
  conf = HBaseConfiguration.create();
  conf.setStrings(CoprocessorHost.USER_REGION_COPROCESSOR_CONF_KEY,
      TestCoprocessor.class.getName());
  util = new HBaseTestingUtility(conf);
  util.startMiniCluster();

  Admin admin = util.getHBaseAdmin();
  if (admin.tableExists(tableName)) {
    if (admin.isTableEnabled(tableName)) {
      admin.disableTable(tableName);
    }
    admin.deleteTable(tableName);
  }
  util.createTable(tableName, new byte[][]{dummy, test});

  Table ht = new HTable(conf, tableName);
  Put p = new Put(row1);
  p.add(dummy, dummy, dummy);
  ht.put(p);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:23,代码来源:TestClientOperationInterrupt.java

示例4: setupOnce

import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; //导入依赖的package包/类
static void setupOnce() throws Exception {
  // Using the our load balancer to control region plans
  conf.setClass(HConstants.HBASE_MASTER_LOADBALANCER_CLASS,
    MyLoadBalancer.class, LoadBalancer.class);
  conf.setClass(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY,
    MyRegionObserver.class, RegionObserver.class);
  // Reduce the maximum attempts to speed up the test
  conf.setInt("hbase.assignment.maximum.attempts", 3);
  // Put meta on master to avoid meta server shutdown handling
  conf.set("hbase.balancer.tablesOnMaster", "hbase:meta");
  conf.setInt("hbase.master.maximum.ping.server.attempts", 3);
  conf.setInt("hbase.master.ping.server.retry.sleep.interval", 1);

  TEST_UTIL.startMiniCluster(1, 4, null, MyMaster.class, MyRegionServer.class);
  admin = TEST_UTIL.getHBaseAdmin();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:17,代码来源:TestAssignmentManagerOnCluster.java

示例5: setUpBeforeClass

import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; //导入依赖的package包/类
@BeforeClass
public static void setUpBeforeClass() throws Exception {
  // Make block sizes small.
  TEST_UTIL.getConfiguration().setInt("dfs.blocksize", 1024 * 1024);
  // quicker heartbeat interval for faster DN death notification
  TEST_UTIL.getConfiguration().setInt("dfs.namenode.heartbeat.recheck-interval", 5000);
  TEST_UTIL.getConfiguration().setInt("dfs.heartbeat.interval", 1);
  TEST_UTIL.getConfiguration().setInt("dfs.client.socket-timeout", 5000);

  // faster failover with cluster.shutdown();fs.close() idiom
  TEST_UTIL.getConfiguration()
      .setInt("hbase.ipc.client.connect.max.retries", 1);
  TEST_UTIL.getConfiguration().setInt(
      "dfs.client.block.recovery.retries", 1);
  TEST_UTIL.getConfiguration().setInt(
    "hbase.ipc.client.connection.maxidletime", 500);
  TEST_UTIL.getConfiguration().set(CoprocessorHost.WAL_COPROCESSOR_CONF_KEY,
      SampleRegionWALObserver.class.getName());
  TEST_UTIL.startMiniDFSCluster(3);

  conf = TEST_UTIL.getConfiguration();
  fs = TEST_UTIL.getDFSCluster().getFileSystem();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:24,代码来源:TestFSHLog.java

示例6: setUpBeforeClass

import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; //导入依赖的package包/类
@BeforeClass
public static void setUpBeforeClass() throws Exception {
  // Make block sizes small.
  TEST_UTIL.getConfiguration().setInt("dfs.blocksize", 1024 * 1024);
  // needed for testAppendClose()
  TEST_UTIL.getConfiguration().setBoolean("dfs.support.broken.append", true);
  TEST_UTIL.getConfiguration().setBoolean("dfs.support.append", true);
  // quicker heartbeat interval for faster DN death notification
  TEST_UTIL.getConfiguration().setInt("dfs.namenode.heartbeat.recheck-interval", 5000);
  TEST_UTIL.getConfiguration().setInt("dfs.heartbeat.interval", 1);
  TEST_UTIL.getConfiguration().setInt("dfs.client.socket-timeout", 5000);

  // faster failover with cluster.shutdown();fs.close() idiom
  TEST_UTIL.getConfiguration()
      .setInt("hbase.ipc.client.connect.max.retries", 1);
  TEST_UTIL.getConfiguration().setInt(
      "dfs.client.block.recovery.retries", 1);
  TEST_UTIL.getConfiguration().setInt(
    "hbase.ipc.client.connection.maxidletime", 500);
  TEST_UTIL.getConfiguration().set(CoprocessorHost.WAL_COPROCESSOR_CONF_KEY,
      SampleRegionWALObserver.class.getName());
  TEST_UTIL.startMiniDFSCluster(3);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:24,代码来源:TestProtobufLog.java

示例7: setUp

import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; //导入依赖的package包/类
@Before
public void setUp() throws Exception {
  baseConfiguration = HBaseConfiguration.create();
  // smaller block size and capacity to trigger more operations
  // and test them
  baseConfiguration.setInt("hbase.regionserver.hlog.blocksize", 1024 * 20);
  baseConfiguration.setInt("replication.source.size.capacity", 1024);
  baseConfiguration.setLong("replication.source.sleepforretries", 100);
  baseConfiguration.setInt("hbase.regionserver.maxlogs", 10);
  baseConfiguration.setLong("hbase.master.logcleaner.ttl", 10);
  baseConfiguration.setBoolean(HConstants.REPLICATION_ENABLE_KEY, true);
  baseConfiguration.setBoolean("dfs.support.append", true);
  baseConfiguration.setLong(HConstants.THREAD_WAKE_FREQUENCY, 100);
  baseConfiguration.setStrings(
      CoprocessorHost.USER_REGION_COPROCESSOR_CONF_KEY,
      CoprocessorCounter.class.getName());

  table = new HTableDescriptor(tableName);
  HColumnDescriptor fam = new HColumnDescriptor(famName);
  fam.setScope(HConstants.REPLICATION_SCOPE_GLOBAL);
  table.addFamily(fam);
  fam = new HColumnDescriptor(noRepfamName);
  table.addFamily(fam);
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:25,代码来源:TestMasterReplication.java

示例8: setupBeforeClass

import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; //导入依赖的package包/类
@BeforeClass
public static void setupBeforeClass() throws Exception {
  util.getConfiguration().set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY,
      PingHandler.class.getName());
  util.startMiniCluster(1);
  cluster = util.getMiniHBaseCluster();

  HTable table = util.createTable(TEST_TABLE, TEST_FAMILY);
  util.createMultiRegions(util.getConfiguration(), table, TEST_FAMILY,
      new byte[][]{ HConstants.EMPTY_BYTE_ARRAY,
          ROW_B, ROW_C});

  Put puta = new Put( ROW_A );
  puta.add(TEST_FAMILY, Bytes.toBytes("col1"), Bytes.toBytes(1));
  table.put(puta);

  Put putb = new Put( ROW_B );
  putb.add(TEST_FAMILY, Bytes.toBytes("col1"), Bytes.toBytes(1));
  table.put(putb);

  Put putc = new Put( ROW_C );
  putc.add(TEST_FAMILY, Bytes.toBytes("col1"), Bytes.toBytes(1));
  table.put(putc);
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:25,代码来源:TestServerCustomProtocol.java

示例9: postOpen

import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; //导入依赖的package包/类
/****************************** Region related hooks ******************************/

  @Override
  public void postOpen(ObserverContext<RegionCoprocessorEnvironment> e) {
    // Read the entire labels table and populate the zk
    if (e.getEnvironment().getRegion().getRegionInfo().getTable().equals(LABELS_TABLE_NAME)) {
      this.labelsRegion = true;
      this.acOn = CoprocessorHost.getLoadedCoprocessors().contains(AccessController.class.getName());
      // Defer the init of VisibilityLabelService on labels region until it is in recovering state.
      if (!e.getEnvironment().getRegion().isRecovering()) {
        initVisibilityLabelService(e.getEnvironment());
      }
    } else {
      checkAuths = e.getEnvironment().getConfiguration()
          .getBoolean(VisibilityConstants.CHECK_AUTHS_FOR_MUTATION, false);
      initVisibilityLabelService(e.getEnvironment());
    }
  }
 
开发者ID:grokcoder,项目名称:pbase,代码行数:19,代码来源:VisibilityController.java

示例10: setUp

import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; //导入依赖的package包/类
@Before
public void setUp() throws Exception {
  baseConfiguration = HBaseConfiguration.create();
  // smaller block size and capacity to trigger more operations
  // and test them
  baseConfiguration.setInt("hbase.regionserver.hlog.blocksize", 1024 * 20);
  baseConfiguration.setInt("replication.source.size.capacity", 1024);
  baseConfiguration.setLong("replication.source.sleepforretries", 100);
  baseConfiguration.setInt("hbase.regionserver.maxlogs", 10);
  baseConfiguration.setLong("hbase.master.logcleaner.ttl", 10);
  baseConfiguration.setBoolean(HConstants.REPLICATION_ENABLE_KEY,
      HConstants.REPLICATION_ENABLE_DEFAULT);
  baseConfiguration.setBoolean("dfs.support.append", true);
  baseConfiguration.setLong(HConstants.THREAD_WAKE_FREQUENCY, 100);
  baseConfiguration.setStrings(
      CoprocessorHost.USER_REGION_COPROCESSOR_CONF_KEY,
      CoprocessorCounter.class.getName());

  table = new HTableDescriptor(tableName);
  HColumnDescriptor fam = new HColumnDescriptor(famName);
  fam.setScope(HConstants.REPLICATION_SCOPE_GLOBAL);
  table.addFamily(fam);
  fam = new HColumnDescriptor(noRepfamName);
  table.addFamily(fam);
}
 
开发者ID:grokcoder,项目名称:pbase,代码行数:26,代码来源:TestMasterReplication.java

示例11: enableSecurity

import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; //导入依赖的package包/类
public static void enableSecurity(Configuration conf) throws IOException {
  conf.set("hadoop.security.authorization", "false");
  conf.set("hadoop.security.authentication", "simple");
  conf.set(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, AccessController.class.getName());
  conf.set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, AccessController.class.getName() +
    "," + SecureBulkLoadEndpoint.class.getName());
  conf.set(CoprocessorHost.REGIONSERVER_COPROCESSOR_CONF_KEY, AccessController.class.getName());
  // The secure minicluster creates separate service principals based on the
  // current user's name, one for each slave. We need to add all of these to
  // the superuser list or security won't function properly. We expect the
  // HBase service account(s) to have superuser privilege.
  String currentUser = User.getCurrent().getName();
  StringBuffer sb = new StringBuffer();
  sb.append("admin,");
  sb.append(currentUser);
  // Assumes we won't ever have a minicluster with more than 5 slaves
  for (int i = 0; i < 5; i++) {
    sb.append(',');
    sb.append(currentUser); sb.append(".hfs."); sb.append(i);
  }
  conf.set("hbase.superuser", sb.toString());
  // Need HFile V3 for tags for security features
  conf.setInt(HFile.FORMAT_VERSION_KEY, 3);
}
 
开发者ID:grokcoder,项目名称:pbase,代码行数:25,代码来源:SecureTestUtil.java

示例12: setUpBeforeClass

import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; //导入依赖的package包/类
@BeforeClass
public static void setUpBeforeClass() throws Exception {
  // Start mini cluster
  TEST_UTIL.getConfiguration().setBoolean("hbase.online.schema.update.enable", true);
  TEST_UTIL.getConfiguration().setInt("hbase.regionserver.msginterval", 100);
  TEST_UTIL.getConfiguration().setInt("hbase.client.pause", 250);
  TEST_UTIL.getConfiguration().setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 6);
  TEST_UTIL.getConfiguration().setBoolean(CoprocessorHost.ABORT_ON_ERROR_KEY, false);
  TEST_UTIL.getConfiguration().setInt("hfile.format.version", 3);
  // Security setup configuration
  SecureTestUtil.enableSecurity(TEST_UTIL.getConfiguration());
  VisibilityTestUtil.enableVisiblityLabels(TEST_UTIL.getConfiguration());

  TEST_UTIL.startMiniCluster();

  // Configure jruby runtime
  List<String> loadPaths = new ArrayList();
  loadPaths.add("src/main/ruby");
  loadPaths.add("src/test/ruby");
  jruby.getProvider().setLoadPaths(loadPaths);
  jruby.put("$TEST_CLUSTER", TEST_UTIL);
  System.setProperty("jruby.jit.logging.verbose", "true");
  System.setProperty("jruby.jit.logging", "true");
  System.setProperty("jruby.native.verbose", "true");
}
 
开发者ID:grokcoder,项目名称:pbase,代码行数:26,代码来源:TestShell.java

示例13: setUp

import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; //导入依赖的package包/类
@Before
public void setUp() throws Exception {
  baseConfiguration = HBaseConfiguration.create();
  // smaller block size and capacity to trigger more operations
  // and test them
  baseConfiguration.setInt("hbase.regionserver.hlog.blocksize", 1024 * 20);
  baseConfiguration.setInt("replication.source.size.capacity", 1024);
  baseConfiguration.setLong("replication.source.sleepforretries", 100);
  baseConfiguration.setInt("hbase.regionserver.maxlogs", 10);
  baseConfiguration.setLong("hbase.master.logcleaner.ttl", 10);
  baseConfiguration.setBoolean(HConstants.REPLICATION_ENABLE_KEY,
      HConstants.REPLICATION_ENABLE_DEFAULT);
  baseConfiguration.setBoolean("dfs.support.append", true);
  baseConfiguration.setLong(HConstants.THREAD_WAKE_FREQUENCY, 100);
  baseConfiguration.setStrings(
      CoprocessorHost.USER_REGION_COPROCESSOR_CONF_KEY,
      CoprocessorCounter.class.getName());

  table = new HTableDescriptor(TableName.valueOf(tableName));
  HColumnDescriptor fam = new HColumnDescriptor(famName);
  fam.setScope(HConstants.REPLICATION_SCOPE_GLOBAL);
  table.addFamily(fam);
  fam = new HColumnDescriptor(noRepfamName);
  table.addFamily(fam);
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:26,代码来源:TestMasterReplication.java

示例14: setUpBeforeClass

import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; //导入依赖的package包/类
@BeforeClass
public static void setUpBeforeClass() throws Exception {
  conf = HBaseConfiguration.create();
  conf.setStrings(CoprocessorHost.USER_REGION_COPROCESSOR_CONF_KEY,
      TestCoprocessor.class.getName());
  util = new HBaseTestingUtility(conf);
  util.startMiniCluster();

  HBaseAdmin admin = util.getHBaseAdmin();
  if (admin.tableExists(tableName)) {
    if (admin.isTableEnabled(tableName)) {
      admin.disableTable(tableName);
    }
    admin.deleteTable(tableName);
  }
  util.createTable(tableName, new byte[][]{dummy, test});

  HTable ht = new HTable(conf, tableName);
  Put p = new Put(row1);
  p.add(dummy, dummy, dummy);
  ht.put(p);
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:23,代码来源:TestClientOperationInterrupt.java

示例15: setUpBeforeClass

import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; //导入依赖的package包/类
@BeforeClass
public static void setUpBeforeClass() throws Exception {
  // Start mini cluster
  TEST_UTIL.getConfiguration().setBoolean("hbase.online.schema.update.enable", true);
  TEST_UTIL.getConfiguration().setInt("hbase.regionserver.msginterval", 100);
  TEST_UTIL.getConfiguration().setInt("hbase.client.pause", 250);
  TEST_UTIL.getConfiguration().setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 6);
  TEST_UTIL.getConfiguration().setBoolean(CoprocessorHost.ABORT_ON_ERROR_KEY, false);
  TEST_UTIL.startMiniCluster();

  // Configure jruby runtime
  List<String> loadPaths = new ArrayList();
  loadPaths.add("src/main/ruby");
  loadPaths.add("src/test/ruby");
  jruby.getProvider().setLoadPaths(loadPaths);
  jruby.put("$TEST_CLUSTER", TEST_UTIL);
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:18,代码来源:TestShell.java


注:本文中的org.apache.hadoop.hbase.coprocessor.CoprocessorHost类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。