当前位置: 首页>>代码示例>>Java>>正文


Java Connection.getAdmin方法代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.client.Connection.getAdmin方法的典型用法代码示例。如果您正苦于以下问题:Java Connection.getAdmin方法的具体用法?Java Connection.getAdmin怎么用?Java Connection.getAdmin使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hbase.client.Connection的用法示例。


在下文中一共展示了Connection.getAdmin方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: main

import org.apache.hadoop.hbase.client.Connection; //导入方法依赖的package包/类
public static void main(String[] argc) throws Exception {
  Configuration conf = HBaseConfiguration.create();
  conf.addResource(new Path("file:///", System.getProperty("oozie.action.conf.xml")));

  if (System.getenv("HADOOP_TOKEN_FILE_LOCATION") != null) {
    conf.set("mapreduce.job.credentials.binary",
             System.getenv("HADOOP_TOKEN_FILE_LOCATION"));
  }

  Connection connection = ConnectionFactory.createConnection(conf);
  Admin admin = connection.getAdmin();

  System.out.println("Compacting table " + argc[0]);
  TableName tableName = TableName.valueOf(argc[0]);
  admin.majorCompact(tableName);
  while (admin.getCompactionState(tableName).toString() == "MAJOR") {
    TimeUnit.SECONDS.sleep(10);
    System.out.println("Compacting table " + argc[0]);
  }
  System.out.println("Done compacting table " + argc[0]);
}
 
开发者ID:cbaenziger,项目名称:Oozie_MajorCompaction_Example,代码行数:22,代码来源:MajorCompaction.java

示例2: doWork

import org.apache.hadoop.hbase.client.Connection; //导入方法依赖的package包/类
@Override
protected int doWork() throws Exception {
    Connection connection = null;
    Admin admin = null;
    try {
        connection = ConnectionFactory.createConnection(getConf());
        admin = connection.getAdmin();
        HBaseProtos.SnapshotDescription.Type type = HBaseProtos.SnapshotDescription.Type.FLUSH;
        if (snapshotType != null) {
            type = HBaseProtos.SnapshotDescription.Type.valueOf(snapshotName.toUpperCase());
        }

        admin.snapshot(snapshotName, TableName.valueOf(tableName), type);
    } catch (Exception e) {
        return -1;
    } finally {
        if (admin != null) {
            admin.close();
        }
        if (connection != null) {
            connection.close();
        }
    }
    return 0;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:26,代码来源:CreateSnapshot.java

示例3: testTableNameEnumeration

import org.apache.hadoop.hbase.client.Connection; //导入方法依赖的package包/类
@Test (timeout=180000)
public void testTableNameEnumeration() throws Exception {
  AccessTestAction listTablesAction = new AccessTestAction() {
    @Override
    public Object run() throws Exception {
      Connection unmanagedConnection =
          ConnectionFactory.createConnection(TEST_UTIL.getConfiguration());
      Admin admin = unmanagedConnection.getAdmin();
      try {
        return Arrays.asList(admin.listTableNames());
      } finally {
        admin.close();
        unmanagedConnection.close();
      }
    }
  };

  verifyAllowed(listTablesAction, SUPERUSER, USER_ADMIN, USER_CREATE, USER_OWNER, USER_RW,
    USER_RO, USER_GROUP_CREATE, USER_GROUP_ADMIN, USER_GROUP_READ, USER_GROUP_WRITE);
  verifyIfEmptyList(listTablesAction, USER_NONE);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:22,代码来源:TestAccessController.java

示例4: setupTable

import org.apache.hadoop.hbase.client.Connection; //导入方法依赖的package包/类
/**
 * Creates a table with given table name and specified number of column
 * families if the table does not already exist.
 */
private void setupTable(final Connection connection, TableName table, int cfs)
throws IOException {
  try {
    LOG.info("Creating table " + table);
    HTableDescriptor htd = new HTableDescriptor(table);
    for (int i = 0; i < cfs; i++) {
      htd.addFamily(new HColumnDescriptor(family(i)));
    }
    try (Admin admin = connection.getAdmin()) {
      admin.createTable(htd);
    }
  } catch (TableExistsException tee) {
    LOG.info("Table " + table + " already exists");
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:20,代码来源:TestLoadIncrementalHFilesSplitRecovery.java

示例5: init

import org.apache.hadoop.hbase.client.Connection; //导入方法依赖的package包/类
private void init() {
  logger.debug("Getting region locations");
  TableName tableName = TableName.valueOf(hbaseScanSpec.getTableName());
  Connection conn = storagePlugin.getConnection();

  try (Admin admin = conn.getAdmin();
       RegionLocator locator = conn.getRegionLocator(tableName)) {
    this.hTableDesc = admin.getTableDescriptor(tableName);
    List<HRegionLocation> regionLocations = locator.getAllRegionLocations();
    statsCalculator = new TableStatsCalculator(conn, hbaseScanSpec, storagePlugin.getContext().getConfig(), storagePluginConfig);

    boolean foundStartRegion = false;
    regionsToScan = new TreeMap<>();
    for (HRegionLocation regionLocation : regionLocations) {
      HRegionInfo regionInfo = regionLocation.getRegionInfo();
      if (!foundStartRegion && hbaseScanSpec.getStartRow() != null && hbaseScanSpec.getStartRow().length != 0 && !regionInfo.containsRow(hbaseScanSpec.getStartRow())) {
        continue;
      }
      foundStartRegion = true;
      regionsToScan.put(regionInfo, regionLocation.getServerName());
      scanSizeInBytes += statsCalculator.getRegionSizeInBytes(regionInfo.getRegionName());
      if (hbaseScanSpec.getStopRow() != null && hbaseScanSpec.getStopRow().length != 0 && regionInfo.containsRow(hbaseScanSpec.getStopRow())) {
        break;
      }
    }
  } catch (IOException e) {
    throw new RuntimeException("Error getting region info for table: " + hbaseScanSpec.getTableName(), e);
  }
  verifyColumns();
}
 
开发者ID:dremio,项目名称:dremio-oss,代码行数:31,代码来源:HBaseGroupScan.java

示例6: initializeTable

import org.apache.hadoop.hbase.client.Connection; //导入方法依赖的package包/类
/**
 * Allows subclasses to initialize the table information.
 *
 * @param connection  The {@link Connection} to the HBase cluster. MUST be unmanaged. We will close.
 * @param tableName  The {@link TableName} of the table to process. 
 * @throws IOException 
 */
protected void initializeTable(Connection connection, TableName tableName) throws IOException {
  if (this.table != null || this.connection != null) {
    LOG.warn("initializeTable called multiple times. Overwriting connection and table " +
        "reference; TableInputFormatBase will not close these old references when done.");
  }
  this.table = connection.getTable(tableName);
  this.regionLocator = connection.getRegionLocator(tableName);
  this.admin = connection.getAdmin();
  this.connection = connection;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:18,代码来源:TableInputFormatBase.java

示例7: doBulkLoad

import org.apache.hadoop.hbase.client.Connection; //导入方法依赖的package包/类
/**
 * Perform a bulk load of the given directory into the given
 * pre-existing table.  This method is not threadsafe.
 *
 * @param hfofDir the directory that was provided as the output path
 * of a job using HFileOutputFormat
 * @param table the table to load into
 * @throws TableNotFoundException if table does not yet exist
 */
@SuppressWarnings("deprecation")
public void doBulkLoad(Path hfofDir, final HTable table)
  throws TableNotFoundException, IOException
{
  Admin admin = null;
  Table t = table;
  Connection conn = table.getConnection();
  boolean closeConnWhenFinished = false;
  try {
    if (conn instanceof ClusterConnection && ((ClusterConnection) conn).isManaged()) {
      LOG.warn("managed connection cannot be used for bulkload. Creating unmanaged connection.");
      // can only use unmanaged connections from here on out.
      conn = ConnectionFactory.createConnection(table.getConfiguration());
      t = conn.getTable(table.getName());
      closeConnWhenFinished = true;
      if (conn instanceof ClusterConnection && ((ClusterConnection) conn).isManaged()) {
        throw new RuntimeException("Failed to create unmanaged connection.");
      }
      admin = conn.getAdmin();
    } else {
      admin = conn.getAdmin();
    }
    try (RegionLocator rl = conn.getRegionLocator(t.getName())) {
      doBulkLoad(hfofDir, admin, t, rl);
    }
  } finally {
    if (admin != null) admin.close();
    if (closeConnWhenFinished) {
      t.close();
      conn.close();
    }
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:43,代码来源:LoadIncrementalHFiles.java

示例8: getRegionServerCount

import org.apache.hadoop.hbase.client.Connection; //导入方法依赖的package包/类
/**
 * Alternative getCurrentNrHRS which is no longer available.
 * @param connection
 * @return Rough count of regionservers out on cluster.
 * @throws IOException 
 */
private static int getRegionServerCount(final Connection connection) throws IOException {
  try (Admin admin = connection.getAdmin()) {
    ClusterStatus status = admin.getClusterStatus();
    Collection<ServerName> servers = status.getServers();
    return servers == null || servers.isEmpty()? 0: servers.size();
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:14,代码来源:RegionSplitter.java

示例9: testTableDeletion

import org.apache.hadoop.hbase.client.Connection; //导入方法依赖的package包/类
@Test (timeout=180000)
public void testTableDeletion() throws Exception {
  User TABLE_ADMIN = User.createUserForTesting(conf, "TestUser", new String[0]);
  final TableName tname = TableName.valueOf("testTableDeletion");
  createTestTable(tname);

  // Grant TABLE ADMIN privs
  grantOnTable(TEST_UTIL, TABLE_ADMIN.getShortName(), tname, null, null, Permission.Action.ADMIN);

  AccessTestAction deleteTableAction = new AccessTestAction() {
    @Override
    public Object run() throws Exception {
      Connection unmanagedConnection =
          ConnectionFactory.createConnection(TEST_UTIL.getConfiguration());
      Admin admin = unmanagedConnection.getAdmin();
      try {
        deleteTable(TEST_UTIL, admin, tname);
      } finally {
        admin.close();
        unmanagedConnection.close();
      }
      return null;
    }
  };

  verifyDenied(deleteTableAction, USER_RW, USER_RO, USER_NONE, USER_GROUP_READ,
    USER_GROUP_WRITE);
  verifyAllowed(deleteTableAction, TABLE_ADMIN);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:30,代码来源:TestAccessController.java

示例10: getUserPermissions

import org.apache.hadoop.hbase.client.Connection; //导入方法依赖的package包/类
/**
 * List all the userPermissions matching the given pattern.
 * @param connection The Connection instance to use
 * @param tableRegex The regular expression string to match against
 * @return - returns an array of UserPermissions
 * @throws Throwable
 */
public static List<UserPermission> getUserPermissions(Connection connection, String tableRegex)
    throws Throwable {
  PayloadCarryingRpcController controller
    = ((ClusterConnection) connection).getRpcControllerFactory().newController();
  List<UserPermission> permList = new ArrayList<UserPermission>();
  try (Table table = connection.getTable(ACL_TABLE_NAME)) {
    try (Admin admin = connection.getAdmin()) {
      CoprocessorRpcChannel service = table.coprocessorService(HConstants.EMPTY_START_ROW);
      BlockingInterface protocol =
          AccessControlProtos.AccessControlService.newBlockingStub(service);
      HTableDescriptor[] htds = null;
      if (tableRegex == null || tableRegex.isEmpty()) {
        permList = ProtobufUtil.getUserPermissions(controller, protocol);
      } else if (tableRegex.charAt(0) == '@') {
        String namespace = tableRegex.substring(1);
        permList = ProtobufUtil.getUserPermissions(controller, protocol,
          Bytes.toBytes(namespace));
      } else {
        htds = admin.listTables(Pattern.compile(tableRegex), true);
        for (HTableDescriptor hd : htds) {
          permList.addAll(ProtobufUtil.getUserPermissions(controller, protocol,
            hd.getTableName()));
        }
      }
    }
  }
  return permList;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:36,代码来源:AccessControlClient.java

示例11: IndexTableAdmin

import org.apache.hadoop.hbase.client.Connection; //导入方法依赖的package包/类
/**
 * init table, table indexType will be given manually
 * used for existing table
 *
 * @param conf
 * @param conn
 * @param tableName
 * @throws IOException
 */
public IndexTableAdmin(Configuration conf, Connection conn, TableName tableName)
    throws IOException {
  this.conf = conf;
  this.conn = conn;
  this.admin = conn.getAdmin();
  HTableDescriptor desc = admin.getTableDescriptor(tableName);
  this.indexTableRelation = IndexTableRelation.getIndexTableRelation(desc);
  buildFromTableDesc = true;
  init();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:20,代码来源:IndexTableAdmin.java

示例12: setUp

import org.apache.hadoop.hbase.client.Connection; //导入方法依赖的package包/类
/**
 * test setup
 * @throws Exception
 */
@BeforeClass
public void setUp() throws Exception {
    final String name = "testTable" + new Random().nextInt(5000);
    tableName = TableName.valueOf(name);
    final HConfig config = HConfig.newBuilder()
                                  .retryCount(5)
                                  .retryBackoff(3000)
                                  .scanBatchSize(50)
                                  .scanCacheSize(50)
                                  .zkQuorum("localhost:2181")
                                  .connectionThreads(4)
                                  .metaLookupThreads(2)
                                  .metaOperationTimeout(5000)
                                  .metricsEnabled(true)
                                  .operationTimeout(5000)
                                  .perRegionMaxTasks(20)
                                  .perServerMaxTasks(40)
                                  .rpcTimeout(7000)
                                  .scannerTimeout(20000)
                                  .threadPoolMaxTasks(100)
                                  .zkSessionTimeout(15000)
                                  .znode("/hbase")
                                  .build();
    final Connection connection = ConnectionFactory.createConnection(config.asConfiguration());

    this.testTable = connection.getTable(this.tableName);
    admin = connection.getAdmin();
    final HColumnDescriptor cfTestDesc
            = new HColumnDescriptor(TEST_CF)
            .setBloomFilterType(BloomType.ROW)
            .setCompactionCompressionType(Compression.Algorithm.SNAPPY)
            .setCompressionType(Compression.Algorithm.SNAPPY)
            .setDataBlockEncoding(DataBlockEncoding.PREFIX)
            .setVersions(1, 1);
    final HTableDescriptor descriptor
            = new HTableDescriptor(tableName)
            .setCompactionEnabled(true)
            .setDurability(Durability.SYNC_WAL)
            .addFamily(cfTestDesc);
    admin.createTable(descriptor);
}
 
开发者ID:i-knowledge,项目名称:hbase-client,代码行数:46,代码来源:BatchIt.java

示例13: testListNamespaces

import org.apache.hadoop.hbase.client.Connection; //导入方法依赖的package包/类
@Test
public void testListNamespaces() throws Exception {
  AccessTestAction listAction = new AccessTestAction() {
    @Override
    public Object run() throws Exception {
      Connection unmanagedConnection =
          ConnectionFactory.createConnection(UTIL.getConfiguration());
      Admin admin = unmanagedConnection.getAdmin();
      try {
        return Arrays.asList(admin.listNamespaceDescriptors());
      } finally {
        admin.close();
        unmanagedConnection.close();
      }
    }
  };

  // listNamespaces         : All access*
  // * Returned list will only show what you can call getNamespaceDescriptor()

  verifyAllowed(listAction, SUPERUSER, USER_GLOBAL_ADMIN, USER_NS_ADMIN, USER_GROUP_ADMIN);

  // we have 3 namespaces: [default, hbase, TEST_NAMESPACE, TEST_NAMESPACE2]
  assertEquals(4, ((List)SUPERUSER.runAs(listAction)).size());
  assertEquals(4, ((List)USER_GLOBAL_ADMIN.runAs(listAction)).size());
  assertEquals(4, ((List)USER_GROUP_ADMIN.runAs(listAction)).size());

  assertEquals(2, ((List)USER_NS_ADMIN.runAs(listAction)).size());

  assertEquals(0, ((List)USER_GLOBAL_CREATE.runAs(listAction)).size());
  assertEquals(0, ((List)USER_GLOBAL_WRITE.runAs(listAction)).size());
  assertEquals(0, ((List)USER_GLOBAL_READ.runAs(listAction)).size());
  assertEquals(0, ((List)USER_GLOBAL_EXEC.runAs(listAction)).size());
  assertEquals(0, ((List)USER_NS_CREATE.runAs(listAction)).size());
  assertEquals(0, ((List)USER_NS_WRITE.runAs(listAction)).size());
  assertEquals(0, ((List)USER_NS_READ.runAs(listAction)).size());
  assertEquals(0, ((List)USER_NS_EXEC.runAs(listAction)).size());
  assertEquals(0, ((List)USER_TABLE_CREATE.runAs(listAction)).size());
  assertEquals(0, ((List)USER_TABLE_WRITE.runAs(listAction)).size());
  assertEquals(0, ((List)USER_GROUP_CREATE.runAs(listAction)).size());
  assertEquals(0, ((List)USER_GROUP_READ.runAs(listAction)).size());
  assertEquals(0, ((List)USER_GROUP_WRITE.runAs(listAction)).size());
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:44,代码来源:TestNamespaceCommands.java

示例14: main

import org.apache.hadoop.hbase.client.Connection; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
  int numRegions = Integer.parseInt(args[0]);
  long numRows = Long.parseLong(args[1]);

  HTableDescriptor htd = new HTableDescriptor(TABLENAME);
  htd.setMaxFileSize(10L * 1024 * 1024 * 1024);
  htd.setValue(HTableDescriptor.SPLIT_POLICY, ConstantSizeRegionSplitPolicy.class.getName());
  htd.addFamily(new HColumnDescriptor(FAMILY1));
  htd.addFamily(new HColumnDescriptor(FAMILY2));
  htd.addFamily(new HColumnDescriptor(FAMILY3));

  Configuration conf = HBaseConfiguration.create();
  Connection conn = ConnectionFactory.createConnection(conf);
  Admin admin = conn.getAdmin();
  if (admin.tableExists(TABLENAME)) {
    admin.disableTable(TABLENAME);
    admin.deleteTable(TABLENAME);
  }
  if (numRegions >= 3) {
    byte[] startKey = new byte[16];
    byte[] endKey = new byte[16];
    Arrays.fill(endKey, (byte) 0xFF);
    admin.createTable(htd, startKey, endKey, numRegions);
  } else {
    admin.createTable(htd);
  }
  admin.close();

  Table table = conn.getTable(TABLENAME);
  byte[] qf = Bytes.toBytes("qf");
  Random rand = new Random();
  byte[] value1 = new byte[16];
  byte[] value2 = new byte[256];
  byte[] value3 = new byte[4096];
  for (long i = 0; i < numRows; i++) {
    Put put = new Put(Hashing.md5().hashLong(i).asBytes());
    rand.setSeed(i);
    rand.nextBytes(value1);
    rand.nextBytes(value2);
    rand.nextBytes(value3);
    put.addColumn(FAMILY1, qf, value1);
    put.addColumn(FAMILY2, qf, value2);
    put.addColumn(FAMILY3, qf, value3);
    table.put(put);
    if (i % 10000 == 0) {
      LOG.info(i + " rows put");
    }
  }
  table.close();
  conn.close();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:52,代码来源:TestPerColumnFamilyFlush.java

示例15: isAccessControllerRunning

import org.apache.hadoop.hbase.client.Connection; //导入方法依赖的package包/类
public static boolean isAccessControllerRunning(final Connection connection)
    throws MasterNotRunningException, ZooKeeperConnectionException, IOException {
  try (Admin admin = connection.getAdmin()) {
    return admin.isTableAvailable(ACL_TABLE_NAME);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:7,代码来源:AccessControlClient.java


注:本文中的org.apache.hadoop.hbase.client.Connection.getAdmin方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。