當前位置: 首頁>>代碼示例>>Java>>正文


Java Admin.close方法代碼示例

本文整理匯總了Java中org.apache.hadoop.hbase.client.Admin.close方法的典型用法代碼示例。如果您正苦於以下問題:Java Admin.close方法的具體用法?Java Admin.close怎麽用?Java Admin.close使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.apache.hadoop.hbase.client.Admin的用法示例。


在下文中一共展示了Admin.close方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: doWork

import org.apache.hadoop.hbase.client.Admin; //導入方法依賴的package包/類
@Override
protected int doWork() throws Exception {
    Connection connection = null;
    Admin admin = null;
    try {
        connection = ConnectionFactory.createConnection(getConf());
        admin = connection.getAdmin();
        HBaseProtos.SnapshotDescription.Type type = HBaseProtos.SnapshotDescription.Type.FLUSH;
        if (snapshotType != null) {
            type = HBaseProtos.SnapshotDescription.Type.valueOf(snapshotName.toUpperCase());
        }

        admin.snapshot(snapshotName, TableName.valueOf(tableName), type);
    } catch (Exception e) {
        return -1;
    } finally {
        if (admin != null) {
            admin.close();
        }
        if (connection != null) {
            connection.close();
        }
    }
    return 0;
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:26,代碼來源:CreateSnapshot.java

示例2: testTableNameEnumeration

import org.apache.hadoop.hbase.client.Admin; //導入方法依賴的package包/類
@Test (timeout=180000)
public void testTableNameEnumeration() throws Exception {
  AccessTestAction listTablesAction = new AccessTestAction() {
    @Override
    public Object run() throws Exception {
      Connection unmanagedConnection =
          ConnectionFactory.createConnection(TEST_UTIL.getConfiguration());
      Admin admin = unmanagedConnection.getAdmin();
      try {
        return Arrays.asList(admin.listTableNames());
      } finally {
        admin.close();
        unmanagedConnection.close();
      }
    }
  };

  verifyAllowed(listTablesAction, SUPERUSER, USER_ADMIN, USER_CREATE, USER_OWNER, USER_RW,
    USER_RO, USER_GROUP_CREATE, USER_GROUP_ADMIN, USER_GROUP_READ, USER_GROUP_WRITE);
  verifyIfEmptyList(listTablesAction, USER_NONE);
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:22,代碼來源:TestAccessController.java

示例3: testSanity

import org.apache.hadoop.hbase.client.Admin; //導入方法依賴的package包/類
/**
 * Make sure we can use the cluster
 * @throws Exception
 */
private void testSanity(final String testName) throws Exception{
  String tableName = testName + "_" + System.currentTimeMillis();
  HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
  HColumnDescriptor family = new HColumnDescriptor("fam");
  desc.addFamily(family);
  LOG.info("Creating table " + tableName);
  Admin admin = TEST_UTIL.getHBaseAdmin();
  try {
    admin.createTable(desc);
  } finally {
    admin.close();
  }

  Table table =
    new HTable(new Configuration(TEST_UTIL.getConfiguration()), desc.getTableName());
  Put put = new Put(Bytes.toBytes("testrow"));
  put.add(Bytes.toBytes("fam"),
      Bytes.toBytes("col"), Bytes.toBytes("testdata"));
  LOG.info("Putting table " + tableName);
  table.put(put);
  table.close();
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:27,代碼來源:TestZooKeeper.java

示例4: prepareForLoadTest

import org.apache.hadoop.hbase.client.Admin; //導入方法依賴的package包/類
protected void prepareForLoadTest() throws IOException {
  LOG.info("Starting load test: dataBlockEncoding=" + dataBlockEncoding +
      ", isMultiPut=" + isMultiPut);
  numKeys = numKeys();
  Admin admin = new HBaseAdmin(conf);
  while (admin.getClusterStatus().getServers().size() < NUM_RS) {
    LOG.info("Sleeping until " + NUM_RS + " RSs are online");
    Threads.sleepWithoutInterrupt(1000);
  }
  admin.close();

  HTableDescriptor htd = new HTableDescriptor(TABLE);
  HColumnDescriptor hcd = new HColumnDescriptor(CF)
    .setCompressionType(compression)
    .setDataBlockEncoding(dataBlockEncoding);
  createPreSplitLoadTestTable(htd, hcd);

  LoadTestDataGenerator dataGen = new MultiThreadedAction.DefaultDataGenerator(CF);
  writerThreads = prepareWriterThreads(dataGen, conf, TABLE);
  readerThreads = prepareReaderThreads(dataGen, conf, TABLE, 100);
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:22,代碼來源:TestMiniClusterLoadSequential.java

示例5: runTestFromCommandLine

import org.apache.hadoop.hbase.client.Admin; //導入方法依賴的package包/類
@Override
public int runTestFromCommandLine() throws Exception {
  IntegrationTestingUtility.setUseDistributedCluster(getConf());
  int numPresplits = getConf().getInt("loadmapper.numPresplits", 5);
  // create HTableDescriptor for specified table
  HTableDescriptor htd = new HTableDescriptor(getTablename());
  htd.addFamily(new HColumnDescriptor(TEST_FAMILY));

  Admin admin = new HBaseAdmin(getConf());
  try {
    admin.createTable(htd, Bytes.toBytes(0L), Bytes.toBytes(-1L), numPresplits);
  } finally {
    admin.close();
  }
  doLoad(getConf(), htd);
  doVerify(getConf(), htd);
  getTestingUtil(getConf()).deleteTable(htd.getName());
  return 0;
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:20,代碼來源:IntegrationTestWithCellVisibilityLoadAndVerify.java

示例6: beforeClass

import org.apache.hadoop.hbase.client.Admin; //導入方法依賴的package包/類
@BeforeClass
public static void beforeClass() throws Exception {
  SUPERUSER = User.createUserForTesting(conf, "admin",
      new String[] { "supergroup" });
  conf = UTIL.getConfiguration();
  conf.setClass(VisibilityUtils.VISIBILITY_LABEL_GENERATOR_CLASS,
      SimpleScanLabelGenerator.class, ScanLabelGenerator.class);
  conf.set("hbase.superuser", SUPERUSER.getShortName());
  conf.set("hbase.coprocessor.master.classes",
      VisibilityController.class.getName());
  conf.set("hbase.coprocessor.region.classes",
      VisibilityController.class.getName());
  conf.setInt("hfile.format.version", 3);
  UTIL.startMiniCluster(1);
  // Wait for the labels table to become available
  UTIL.waitTableEnabled(VisibilityConstants.LABELS_TABLE_NAME.getName(), 50000);
  createLabels();
  Admin admin = new HBaseAdmin(UTIL.getConfiguration());
  HTableDescriptor tableDescriptor = new HTableDescriptor(
      TableName.valueOf(tableAname));
  for (HColumnDescriptor family : families) {
    tableDescriptor.addFamily(family);
  }
  admin.createTable(tableDescriptor);
  admin.close();
  setAuths();
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:28,代碼來源:TestThriftHBaseServiceHandlerWithLabels.java

示例7: beforeClass

import org.apache.hadoop.hbase.client.Admin; //導入方法依賴的package包/類
@BeforeClass
public static void beforeClass() throws Exception {
  UTIL.startMiniCluster();
  Admin admin = new HBaseAdmin(UTIL.getConfiguration());
  HTableDescriptor tableDescriptor = new HTableDescriptor(TableName.valueOf(tableAname));
  for (HColumnDescriptor family : families) {
    tableDescriptor.addFamily(family);
  }
  admin.createTable(tableDescriptor);
  admin.close();
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:12,代碼來源:TestThriftHBaseServiceHandler.java

示例8: main

import org.apache.hadoop.hbase.client.Admin; //導入方法依賴的package包/類
/**
 * Test things basically work.
 * @param args
 * @throws IOException
 */
public static void main(String[] args) throws IOException {
  Configuration conf = HBaseConfiguration.create();
  LocalHBaseCluster cluster = new LocalHBaseCluster(conf);
  cluster.startup();
  Admin admin = new HBaseAdmin(conf);
  try {
    HTableDescriptor htd =
      new HTableDescriptor(TableName.valueOf(cluster.getClass().getName()));
    admin.createTable(htd);
  } finally {
    admin.close();
  }
  cluster.shutdown();
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:20,代碼來源:LocalHBaseCluster.java

示例9: doBulkLoad

import org.apache.hadoop.hbase.client.Admin; //導入方法依賴的package包/類
/**
 * Perform a bulk load of the given directory into the given
 * pre-existing table.  This method is not threadsafe.
 *
 * @param hfofDir the directory that was provided as the output path
 * of a job using HFileOutputFormat
 * @param table the table to load into
 * @throws TableNotFoundException if table does not yet exist
 */
@SuppressWarnings("deprecation")
public void doBulkLoad(Path hfofDir, final HTable table)
  throws TableNotFoundException, IOException
{
  Admin admin = null;
  Table t = table;
  Connection conn = table.getConnection();
  boolean closeConnWhenFinished = false;
  try {
    if (conn instanceof ClusterConnection && ((ClusterConnection) conn).isManaged()) {
      LOG.warn("managed connection cannot be used for bulkload. Creating unmanaged connection.");
      // can only use unmanaged connections from here on out.
      conn = ConnectionFactory.createConnection(table.getConfiguration());
      t = conn.getTable(table.getName());
      closeConnWhenFinished = true;
      if (conn instanceof ClusterConnection && ((ClusterConnection) conn).isManaged()) {
        throw new RuntimeException("Failed to create unmanaged connection.");
      }
      admin = conn.getAdmin();
    } else {
      admin = conn.getAdmin();
    }
    try (RegionLocator rl = conn.getRegionLocator(t.getName())) {
      doBulkLoad(hfofDir, admin, t, rl);
    }
  } finally {
    if (admin != null) admin.close();
    if (closeConnWhenFinished) {
      t.close();
      conn.close();
    }
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:43,代碼來源:LoadIncrementalHFiles.java

示例10: merge

import org.apache.hadoop.hbase.client.Admin; //導入方法依賴的package包/類
/**
 * Scans the table and merges two adjacent regions if they are small. This
 * only happens when a lot of rows are deleted.
 *
 * When merging the hbase:meta region, the HBase instance must be offline.
 * When merging a normal table, the HBase instance must be online, but the
 * table must be disabled.
 *
 * @param conf        - configuration object for HBase
 * @param fs          - FileSystem where regions reside
 * @param tableName   - Table to be compacted
 * @param testMasterRunning True if we are to verify master is down before
 * running merge
 * @throws IOException
 */
public static void merge(Configuration conf, FileSystem fs,
  final TableName tableName, final boolean testMasterRunning)
throws IOException {
  boolean masterIsRunning = false;
  if (testMasterRunning) {
    masterIsRunning = HConnectionManager
        .execute(new HConnectable<Boolean>(conf) {
          @Override
          public Boolean connect(HConnection connection) throws IOException {
            return connection.isMasterRunning();
          }
        });
  }
  if (tableName.equals(TableName.META_TABLE_NAME)) {
    if (masterIsRunning) {
      throw new IllegalStateException(
          "Can not compact hbase:meta table if instance is on-line");
    }
    // TODO reenable new OfflineMerger(conf, fs).process();
  } else {
    if(!masterIsRunning) {
      throw new IllegalStateException(
          "HBase instance must be running to merge a normal table");
    }
    Admin admin = new HBaseAdmin(conf);
    try {
      if (!admin.isTableDisabled(tableName)) {
        throw new TableNotDisabledException(tableName);
      }
    } finally {
      admin.close();
    }
    new OnlineMerger(conf, fs, tableName).process();
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:51,代碼來源:HMerge.java

示例11: createPresplitTable

import org.apache.hadoop.hbase.client.Admin; //導入方法依賴的package包/類
static void createPresplitTable(TableName tableName, SplitAlgorithm splitAlgo,
        String[] columnFamilies, Configuration conf)
throws IOException, InterruptedException {
  final int splitCount = conf.getInt("split.count", 0);
  Preconditions.checkArgument(splitCount > 1, "Split count must be > 1");

  Preconditions.checkArgument(columnFamilies.length > 0,
      "Must specify at least one column family. ");
  LOG.debug("Creating table " + tableName + " with " + columnFamilies.length
      + " column families.  Presplitting to " + splitCount + " regions");

  HTableDescriptor desc = new HTableDescriptor(tableName);
  for (String cf : columnFamilies) {
    desc.addFamily(new HColumnDescriptor(Bytes.toBytes(cf)));
  }
  try (Connection connection = ConnectionFactory.createConnection(conf)) {
    Admin admin = connection.getAdmin();
    try {
      Preconditions.checkArgument(!admin.tableExists(tableName),
        "Table already exists: " + tableName);
      admin.createTable(desc, splitAlgo.split(splitCount));
    } finally {
      admin.close();
    }
    LOG.debug("Table created!  Waiting for regions to show online in META...");
    if (!conf.getBoolean("split.verify", true)) {
      // NOTE: createTable is synchronous on the table, but not on the regions
      int onlineRegions = 0;
      while (onlineRegions < splitCount) {
        onlineRegions = MetaTableAccessor.getRegionCount(connection, tableName);
        LOG.debug(onlineRegions + " of " + splitCount + " regions online...");
        if (onlineRegions < splitCount) {
          Thread.sleep(10 * 1000); // sleep
        }
      }
    }
    LOG.debug("Finished creating table with " + splitCount + " regions");
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:40,代碼來源:RegionSplitter.java

示例12: testTableDeletion

import org.apache.hadoop.hbase.client.Admin; //導入方法依賴的package包/類
@Test (timeout=180000)
public void testTableDeletion() throws Exception {
  User TABLE_ADMIN = User.createUserForTesting(conf, "TestUser", new String[0]);
  final TableName tname = TableName.valueOf("testTableDeletion");
  createTestTable(tname);

  // Grant TABLE ADMIN privs
  grantOnTable(TEST_UTIL, TABLE_ADMIN.getShortName(), tname, null, null, Permission.Action.ADMIN);

  AccessTestAction deleteTableAction = new AccessTestAction() {
    @Override
    public Object run() throws Exception {
      Connection unmanagedConnection =
          ConnectionFactory.createConnection(TEST_UTIL.getConfiguration());
      Admin admin = unmanagedConnection.getAdmin();
      try {
        deleteTable(TEST_UTIL, admin, tname);
      } finally {
        admin.close();
        unmanagedConnection.close();
      }
      return null;
    }
  };

  verifyDenied(deleteTableAction, USER_RW, USER_RO, USER_NONE, USER_GROUP_READ,
    USER_GROUP_WRITE);
  verifyAllowed(deleteTableAction, TABLE_ADMIN);
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:30,代碼來源:TestAccessController.java

示例13: setupBeforeClass

import org.apache.hadoop.hbase.client.Admin; //導入方法依賴的package包/類
@BeforeClass
public static void setupBeforeClass() throws Exception {
  // set configure to indicate which cp should be loaded
  Configuration conf = util.getConfiguration();
  conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY,
      org.apache.hadoop.hbase.coprocessor.ColumnAggregationEndpoint.class.getName(),
      ProtobufCoprocessorService.class.getName(),
      ColumnAggregationEndpointWithErrors.class.getName(),
      ColumnAggregationEndpointNullResponse.class.getName());
  conf.setStrings(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY,
      ProtobufCoprocessorService.class.getName());
  util.startMiniCluster(2);
  Admin admin = new HBaseAdmin(conf);
  HTableDescriptor desc = new HTableDescriptor(TEST_TABLE);
  desc.addFamily(new HColumnDescriptor(TEST_FAMILY));
  admin.createTable(desc, new byte[][]{ROWS[rowSeperator1], ROWS[rowSeperator2]});
  util.waitUntilAllRegionsAssigned(TEST_TABLE);
  admin.close();

  Table table = new HTable(conf, TEST_TABLE);
  for (int i = 0; i < ROWSIZE; i++) {
    Put put = new Put(ROWS[i]);
    put.add(TEST_FAMILY, TEST_QUALIFIER, Bytes.toBytes(i));
    table.put(put);
  }
  table.close();
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:28,代碼來源:TestBatchCoprocessorEndpoint.java

示例14: listReplicated

import org.apache.hadoop.hbase.client.Admin; //導入方法依賴的package包/類
/**
 * Find all column families that are replicated from this cluster
 * @return the full list of the replicated column families of this cluster as:
 *        tableName, family name, replicationType
 *
 * Currently replicationType is Global. In the future, more replication
 * types may be extended here. For example
 *  1) the replication may only apply to selected peers instead of all peers
 *  2) the replicationType may indicate the host Cluster servers as Slave
 *     for the table:columnFam.
 */
public List<HashMap<String, String>> listReplicated() throws IOException {
  List<HashMap<String, String>> replicationColFams = new ArrayList<HashMap<String, String>>();

  Admin admin = connection.getAdmin();
  HTableDescriptor[] tables;
  try {
    tables = admin.listTables();
  } finally {
    if (admin!= null) admin.close();
  }

  for (HTableDescriptor table : tables) {
    HColumnDescriptor[] columns = table.getColumnFamilies();
    String tableName = table.getNameAsString();
    for (HColumnDescriptor column : columns) {
      if (column.getScope() != HConstants.REPLICATION_SCOPE_LOCAL) {
        // At this moment, the columfam is replicated to all peers
        HashMap<String, String> replicationEntry = new HashMap<String, String>();
        replicationEntry.put(TNAME, tableName);
        replicationEntry.put(CFNAME, column.getNameAsString());
        replicationEntry.put(REPLICATIONTYPE, REPLICATIONGLOBAL);
        replicationColFams.add(replicationEntry);
      }
    }
  }

  return replicationColFams;
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:40,代碼來源:ReplicationAdmin.java

示例15: setTableRep

import org.apache.hadoop.hbase.client.Admin; //導入方法依賴的package包/類
/**
 * Set the table's replication switch if the table's replication switch is already not set.
 * @param tableName name of the table
 * @param isRepEnabled is replication switch enable or disable
 * @throws IOException if a remote or network exception occurs
 */
private void setTableRep(final TableName tableName, boolean isRepEnabled) throws IOException {
  Admin admin = null;
  try {
    admin = this.connection.getAdmin();
    HTableDescriptor htd = admin.getTableDescriptor(tableName);
    if (isTableRepEnabled(htd) ^ isRepEnabled) {
      boolean isOnlineSchemaUpdateEnabled =
          this.connection.getConfiguration()
              .getBoolean("hbase.online.schema.update.enable", true);
      if (!isOnlineSchemaUpdateEnabled) {
        admin.disableTable(tableName);
      }
      for (HColumnDescriptor hcd : htd.getFamilies()) {
        hcd.setScope(isRepEnabled ? HConstants.REPLICATION_SCOPE_GLOBAL
            : HConstants.REPLICATION_SCOPE_LOCAL);
      }
      admin.modifyTable(tableName, htd);
      if (!isOnlineSchemaUpdateEnabled) {
        admin.enableTable(tableName);
      }
    }
  } finally {
    if (admin != null) {
      try {
        admin.close();
      } catch (IOException e) {
        LOG.warn("Failed to close admin connection.");
        LOG.debug("Details on failure to close admin connection.", e);
      }
    }
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:39,代碼來源:ReplicationAdmin.java


注:本文中的org.apache.hadoop.hbase.client.Admin.close方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。