當前位置: 首頁>>代碼示例>>Java>>正文


Java Admin.isTableEnabled方法代碼示例

本文整理匯總了Java中org.apache.hadoop.hbase.client.Admin.isTableEnabled方法的典型用法代碼示例。如果您正苦於以下問題:Java Admin.isTableEnabled方法的具體用法?Java Admin.isTableEnabled怎麽用?Java Admin.isTableEnabled使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.apache.hadoop.hbase.client.Admin的用法示例。


在下文中一共展示了Admin.isTableEnabled方法的6個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: setUp

import org.apache.hadoop.hbase.client.Admin; //導入方法依賴的package包/類
@Before
public void setUp() throws Exception {
  LOG.info(String.format("Initializing cluster with %d region servers.",
    REGION_SERVER_COUNT));
  util.initializeCluster(REGION_SERVER_COUNT);
  LOG.info("Cluster initialized");

  Admin admin = util.getHBaseAdmin();
  if (admin.tableExists(TABLE_NAME)) {
    LOG.info(String.format("Deleting existing table %s.", TABLE_NAME));
    if (admin.isTableEnabled(TABLE_NAME)) admin.disableTable(TABLE_NAME);
    admin.deleteTable(TABLE_NAME);
    LOG.info(String.format("Existing table %s deleted.", TABLE_NAME));
  }
  LOG.info("Cluster ready");
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:17,代碼來源:IntegrationTestManyRegions.java

示例2: sniff

import org.apache.hadoop.hbase.client.Admin; //導入方法依賴的package包/類
/**
 * Canary entry point for specified table.
 * @throws Exception
 */
private static List<Future<Void>> sniff(final Admin admin, final Sink sink, String tableName,
    ExecutorService executor, TaskType taskType) throws Exception {
  if (admin.isTableEnabled(TableName.valueOf(tableName))) {
    return Canary.sniff(admin, sink, admin.getTableDescriptor(TableName.valueOf(tableName)),
      executor, taskType);
  } else {
    LOG.warn(String.format("Table %s is not enabled", tableName));
  }
  return new LinkedList<Future<Void>>();
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:15,代碼來源:Canary.java

示例3: setUp

import org.apache.hadoop.hbase.client.Admin; //導入方法依賴的package包/類
@Before
public void setUp() throws Exception {
  Admin admin = util.getHBaseAdmin();
  if (admin.tableExists(tableName)) {
    if (admin.isTableEnabled(tableName)) {
      admin.disableTable(tableName);
    }
    admin.deleteTable(tableName);
  }
  util.createTable(tableName, new byte[][] {dummy, test});
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:12,代碼來源:TestRegionObserverBypass.java

示例4: tearDown

import org.apache.hadoop.hbase.client.Admin; //導入方法依賴的package包/類
@After
public void tearDown() throws IOException {
  LOG.info("Cleaning up after test.");
  Admin admin = util.getHBaseAdmin();
  if (admin.tableExists(TABLE_NAME)) {
    if (admin.isTableEnabled(TABLE_NAME)) admin.disableTable(TABLE_NAME);
    admin.deleteTable(TABLE_NAME);
  }
  LOG.info("Restoring cluster.");
  util.restoreCluster();
  LOG.info("Cluster restored.");
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:13,代碼來源:IntegrationTestManyRegions.java

示例5: checkTable

import org.apache.hadoop.hbase.client.Admin; //導入方法依賴的package包/類
static boolean checkTable(Admin admin, TestOptions opts) throws IOException {
  TableName tableName = TableName.valueOf(opts.tableName);
  boolean needsDelete = false, exists = admin.tableExists(tableName);
  boolean isReadCmd = opts.cmdName.toLowerCase().contains("read")
    || opts.cmdName.toLowerCase().contains("scan");
  if (!exists && isReadCmd) {
    throw new IllegalStateException(
      "Must specify an existing table for read commands. Run a write command first.");
  }
  HTableDescriptor desc =
    exists ? admin.getTableDescriptor(TableName.valueOf(opts.tableName)) : null;
  byte[][] splits = getSplits(opts);

  // recreate the table when user has requested presplit or when existing
  // {RegionSplitPolicy,replica count} does not match requested.
  if ((exists && opts.presplitRegions != DEFAULT_OPTS.presplitRegions)
    || (!isReadCmd && desc != null && desc.getRegionSplitPolicyClassName() != opts.splitPolicy)
    || (!isReadCmd && desc != null && desc.getRegionReplication() != opts.replicas)) {
    needsDelete = true;
    // wait, why did it delete my table?!?
    LOG.debug(Objects.toStringHelper("needsDelete")
      .add("needsDelete", needsDelete)
      .add("isReadCmd", isReadCmd)
      .add("exists", exists)
      .add("desc", desc)
      .add("presplit", opts.presplitRegions)
      .add("splitPolicy", opts.splitPolicy)
      .add("replicas", opts.replicas));
  }

  // remove an existing table
  if (needsDelete) {
    if (admin.isTableEnabled(tableName)) {
      admin.disableTable(tableName);
    }
    admin.deleteTable(tableName);
  }

  // table creation is necessary
  if (!exists || needsDelete) {
    desc = getTableDescriptor(opts);
    if (splits != null) {
      if (LOG.isDebugEnabled()) {
        for (int i = 0; i < splits.length; i++) {
          LOG.debug(" split " + i + ": " + Bytes.toStringBinary(splits[i]));
        }
      }
    }
    admin.createTable(desc, splits);
    LOG.info("Table " + desc + " created");
  }
  return admin.tableExists(tableName);
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:54,代碼來源:PerformanceEvaluation.java

示例6: loadingClassFromLibDirInJar

import org.apache.hadoop.hbase.client.Admin; //導入方法依賴的package包/類
void loadingClassFromLibDirInJar(String libPrefix) throws Exception {
  FileSystem fs = cluster.getFileSystem();

  File innerJarFile1 = buildCoprocessorJar(cpName1);
  File innerJarFile2 = buildCoprocessorJar(cpName2);
  File outerJarFile = new File(TEST_UTIL.getDataTestDir().toString(), "outer.jar");

  ClassLoaderTestHelper.addJarFilesToJar(
    outerJarFile, libPrefix, innerJarFile1, innerJarFile2);

  // copy the jars into dfs
  fs.copyFromLocalFile(new Path(outerJarFile.getPath()),
    new Path(fs.getUri().toString() + Path.SEPARATOR));
  String jarFileOnHDFS = fs.getUri().toString() + Path.SEPARATOR +
    outerJarFile.getName();
  assertTrue("Copy jar file to HDFS failed.",
    fs.exists(new Path(jarFileOnHDFS)));
  LOG.info("Copied jar file to HDFS: " + jarFileOnHDFS);

  // create a table that references the coprocessors
  HTableDescriptor htd = new HTableDescriptor(tableName);
  htd.addFamily(new HColumnDescriptor("test"));
    // without configuration values
  htd.setValue("COPROCESSOR$1", jarFileOnHDFS.toString() + "|" + cpName1 +
    "|" + Coprocessor.PRIORITY_USER);
    // with configuration values
  htd.setValue("COPROCESSOR$2", jarFileOnHDFS.toString() + "|" + cpName2 +
    "|" + Coprocessor.PRIORITY_USER + "|k1=v1,k2=v2,k3=v3");
  Admin admin = TEST_UTIL.getHBaseAdmin();
  if (admin.tableExists(tableName)) {
    if (admin.isTableEnabled(tableName)) {
      admin.disableTable(tableName);
    }
    admin.deleteTable(tableName);
  }
  admin.createTable(htd);
  waitForTable(htd.getTableName());

  // verify that the coprocessors were loaded
  boolean found1 = false, found2 = false, found2_k1 = false,
      found2_k2 = false, found2_k3 = false;
  MiniHBaseCluster hbase = TEST_UTIL.getHBaseCluster();
  for (Region region: hbase.getRegionServer(0).getOnlineRegionsLocalContext()) {
    if (region.getRegionInfo().getRegionNameAsString().startsWith(tableName.getNameAsString())) {
      CoprocessorEnvironment env;
      env = region.getCoprocessorHost().findCoprocessorEnvironment(cpName1);
      if (env != null) {
        found1 = true;
      }
      env = region.getCoprocessorHost().findCoprocessorEnvironment(cpName2);
      if (env != null) {
        found2 = true;
        Configuration conf = env.getConfiguration();
        found2_k1 = conf.get("k1") != null;
        found2_k2 = conf.get("k2") != null;
        found2_k3 = conf.get("k3") != null;
      }
    }
  }
  assertTrue("Class " + cpName1 + " was missing on a region", found1);
  assertTrue("Class " + cpName2 + " was missing on a region", found2);
  assertTrue("Configuration key 'k1' was missing on a region", found2_k1);
  assertTrue("Configuration key 'k2' was missing on a region", found2_k2);
  assertTrue("Configuration key 'k3' was missing on a region", found2_k3);
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:66,代碼來源:TestClassLoading.java


注:本文中的org.apache.hadoop.hbase.client.Admin.isTableEnabled方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。