当前位置: 首页>>代码示例>>Java>>正文


Java Connection.close方法代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.client.Connection.close方法的典型用法代码示例。如果您正苦于以下问题:Java Connection.close方法的具体用法?Java Connection.close怎么用?Java Connection.close使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hbase.client.Connection的用法示例。


在下文中一共展示了Connection.close方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: initCredentials

import org.apache.hadoop.hbase.client.Connection; //导入方法依赖的package包/类
public static void initCredentials(JobConf job) throws IOException {
  UserProvider userProvider = UserProvider.instantiate(job);
  if (userProvider.isHadoopSecurityEnabled()) {
    // propagate delegation related props from launcher job to MR job
    if (System.getenv("HADOOP_TOKEN_FILE_LOCATION") != null) {
      job.set("mapreduce.job.credentials.binary", System.getenv("HADOOP_TOKEN_FILE_LOCATION"));
    }
  }

  if (userProvider.isHBaseSecurityEnabled()) {
    Connection conn = ConnectionFactory.createConnection(job);
    try {
      // login the server principal (if using secure Hadoop)
      User user = userProvider.getCurrent();
      TokenUtil.addTokenForJob(conn, job, user);
    } catch (InterruptedException ie) {
      ie.printStackTrace();
      Thread.currentThread().interrupt();
    } finally {
      conn.close();
    }
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:24,代码来源:TableMapReduceUtil.java

示例2: initCredentialsForCluster

import org.apache.hadoop.hbase.client.Connection; //导入方法依赖的package包/类
/**
 * Obtain an authentication token, for the specified cluster, on behalf of the current user
 * and add it to the credentials for the given map reduce job.
 *
 * @param job The job that requires the permission.
 * @param conf The configuration to use in connecting to the peer cluster
 * @throws IOException When the authentication token cannot be obtained.
 */
public static void initCredentialsForCluster(Job job, Configuration conf)
    throws IOException {
  UserProvider userProvider = UserProvider.instantiate(job.getConfiguration());
  if (userProvider.isHBaseSecurityEnabled()) {
    try {
      Connection peerConn = ConnectionFactory.createConnection(conf);
      try {
        TokenUtil.addTokenForJob(peerConn, userProvider.getCurrent(), job);
      } finally {
        peerConn.close();
      }
    } catch (InterruptedException e) {
      LOG.info("Interrupted obtaining user authentication token");
      Thread.interrupted();
    }
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:26,代码来源:TableMapReduceUtil.java

示例3: doWork

import org.apache.hadoop.hbase.client.Connection; //导入方法依赖的package包/类
@Override
protected int doWork() throws Exception {
    Connection connection = null;
    Admin admin = null;
    try {
        connection = ConnectionFactory.createConnection(getConf());
        admin = connection.getAdmin();
        HBaseProtos.SnapshotDescription.Type type = HBaseProtos.SnapshotDescription.Type.FLUSH;
        if (snapshotType != null) {
            type = HBaseProtos.SnapshotDescription.Type.valueOf(snapshotName.toUpperCase());
        }

        admin.snapshot(snapshotName, TableName.valueOf(tableName), type);
    } catch (Exception e) {
        return -1;
    } finally {
        if (admin != null) {
            admin.close();
        }
        if (connection != null) {
            connection.close();
        }
    }
    return 0;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:26,代码来源:CreateSnapshot.java

示例4: fixMetaHoleOnlineAndAddReplicas

import org.apache.hadoop.hbase.client.Connection; //导入方法依赖的package包/类
/**
 * Puts the specified HRegionInfo into META with replica related columns
 */
public static void fixMetaHoleOnlineAndAddReplicas(Configuration conf,
    HRegionInfo hri, Collection<ServerName> servers, int numReplicas) throws IOException {
  Connection conn = ConnectionFactory.createConnection(conf);
  Table meta = conn.getTable(TableName.META_TABLE_NAME);
  Put put = MetaTableAccessor.makePutFromRegionInfo(hri);
  if (numReplicas > 1) {
    Random r = new Random();
    ServerName[] serversArr = servers.toArray(new ServerName[servers.size()]);
    for (int i = 1; i < numReplicas; i++) {
      ServerName sn = serversArr[r.nextInt(serversArr.length)];
      // the column added here is just to make sure the master is able to
      // see the additional replicas when it is asked to assign. The
      // final value of these columns will be different and will be updated
      // by the actual regionservers that start hosting the respective replicas
      MetaTableAccessor.addLocation(put, sn, sn.getStartcode(), -1, i);
    }
  }
  meta.put(put);
  meta.close();
  conn.close();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:25,代码来源:HBaseFsckRepair.java

示例5: testUseExistingToken

import org.apache.hadoop.hbase.client.Connection; //导入方法依赖的package包/类
@Test
public void testUseExistingToken() throws Exception {
  User user = User.createUserForTesting(TEST_UTIL.getConfiguration(), "testuser2",
      new String[]{"testgroup"});
  Token<AuthenticationTokenIdentifier> token =
      secretManager.generateToken(user.getName());
  assertNotNull(token);
  user.addToken(token);

  // make sure we got a token
  Token<AuthenticationTokenIdentifier> firstToken =
      new AuthenticationTokenSelector().selectToken(token.getService(), user.getTokens());
  assertNotNull(firstToken);
  assertEquals(token, firstToken);

  Connection conn = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration());
  try {
    assertFalse(TokenUtil.addTokenIfMissing(conn, user));
    // make sure we still have the same token
    Token<AuthenticationTokenIdentifier> secondToken =
        new AuthenticationTokenSelector().selectToken(token.getService(), user.getTokens());
    assertEquals(firstToken, secondToken);
  } finally {
    conn.close();
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:27,代码来源:TestTokenAuthentication.java

示例6: shutdown

import org.apache.hadoop.hbase.client.Connection; //导入方法依赖的package包/类
public void shutdown() {
    try {
        final Connection connection = getConnection();
        if (connection != null) {
            connection.close();
        }
    } catch (final IOException e) {
        throw new HBaseException("Unable close HBase connection", e);
    }
}
 
开发者ID:gchq,项目名称:stroom-stats,代码行数:11,代码来源:HBaseConnection.java

示例7: obtainAndCacheToken

import org.apache.hadoop.hbase.client.Connection; //导入方法依赖的package包/类
/**
 * Obtain an authentication token for the given user and add it to the
 * user's credentials.
 * @param conf The configuration for connecting to the cluster
 * @param user The user for whom to obtain the token
 * @throws IOException If making a remote call to the authentication service fails
 * @throws InterruptedException If executing as the given user is interrupted
 * @deprecated Replaced by {@link #obtainAndCacheToken(Connection,User)}
 */
@Deprecated
public static void obtainAndCacheToken(final Configuration conf,
                                       UserGroupInformation user)
    throws IOException, InterruptedException {
  Connection conn = ConnectionFactory.createConnection(conf);
  try {
    UserProvider userProvider = UserProvider.instantiate(conf);
    obtainAndCacheToken(conn, userProvider.create(user));
  } finally {
    conn.close();
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:22,代码来源:TokenUtil.java

示例8: generatePartitions

import org.apache.hadoop.hbase.client.Connection; //导入方法依赖的package包/类
private void generatePartitions(Path partitionsPath) throws IOException {
  Connection connection = ConnectionFactory.createConnection(getConf());
  Pair<byte[][], byte[][]> regionKeys
    = connection.getRegionLocator(TableName.valueOf(tableHash.tableName)).getStartEndKeys();
  connection.close();
  
  tableHash.selectPartitions(regionKeys);
  LOG.info("Writing " + tableHash.partitions.size() + " partition keys to " + partitionsPath);
  
  tableHash.writePartitionFile(getConf(), partitionsPath);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:12,代码来源:HashTable.java

示例9: initCredentials

import org.apache.hadoop.hbase.client.Connection; //导入方法依赖的package包/类
public static void initCredentials(Job job) throws IOException {
  UserProvider userProvider = UserProvider.instantiate(job.getConfiguration());
  if (userProvider.isHadoopSecurityEnabled()) {
    // propagate delegation related props from launcher job to MR job
    if (System.getenv("HADOOP_TOKEN_FILE_LOCATION") != null) {
      job.getConfiguration().set("mapreduce.job.credentials.binary",
                                 System.getenv("HADOOP_TOKEN_FILE_LOCATION"));
    }
  }

  if (userProvider.isHBaseSecurityEnabled()) {
    try {
      // init credentials for remote cluster
      String quorumAddress = job.getConfiguration().get(TableOutputFormat.QUORUM_ADDRESS);
      User user = userProvider.getCurrent();
      if (quorumAddress != null) {
        Configuration peerConf = HBaseConfiguration.createClusterConf(job.getConfiguration(),
            quorumAddress, TableOutputFormat.OUTPUT_CONF_PREFIX);
        Connection peerConn = ConnectionFactory.createConnection(peerConf);
        try {
          TokenUtil.addTokenForJob(peerConn, user, job);
        } finally {
          peerConn.close();
        }
      }

      Connection conn = ConnectionFactory.createConnection(job.getConfiguration());
      try {
        TokenUtil.addTokenForJob(conn, user, job);
      } finally {
        conn.close();
      }
    } catch (InterruptedException ie) {
      LOG.info("Interrupted obtaining user authentication token");
      Thread.currentThread().interrupt();
    }
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:39,代码来源:TableMapReduceUtil.java

示例10: doBulkLoad

import org.apache.hadoop.hbase.client.Connection; //导入方法依赖的package包/类
/**
 * Perform a bulk load of the given directory into the given
 * pre-existing table.  This method is not threadsafe.
 *
 * @param hfofDir the directory that was provided as the output path
 * of a job using HFileOutputFormat
 * @param table the table to load into
 * @throws TableNotFoundException if table does not yet exist
 */
@SuppressWarnings("deprecation")
public void doBulkLoad(Path hfofDir, final HTable table)
  throws TableNotFoundException, IOException
{
  Admin admin = null;
  Table t = table;
  Connection conn = table.getConnection();
  boolean closeConnWhenFinished = false;
  try {
    if (conn instanceof ClusterConnection && ((ClusterConnection) conn).isManaged()) {
      LOG.warn("managed connection cannot be used for bulkload. Creating unmanaged connection.");
      // can only use unmanaged connections from here on out.
      conn = ConnectionFactory.createConnection(table.getConfiguration());
      t = conn.getTable(table.getName());
      closeConnWhenFinished = true;
      if (conn instanceof ClusterConnection && ((ClusterConnection) conn).isManaged()) {
        throw new RuntimeException("Failed to create unmanaged connection.");
      }
      admin = conn.getAdmin();
    } else {
      admin = conn.getAdmin();
    }
    try (RegionLocator rl = conn.getRegionLocator(t.getName())) {
      doBulkLoad(hfofDir, admin, t, rl);
    }
  } finally {
    if (admin != null) admin.close();
    if (closeConnWhenFinished) {
      t.close();
      conn.close();
    }
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:43,代码来源:LoadIncrementalHFiles.java

示例11: testRegionReplicaReplication

import org.apache.hadoop.hbase.client.Connection; //导入方法依赖的package包/类
public void testRegionReplicaReplication(int regionReplication) throws Exception {
  // test region replica replication. Create a table with single region, write some data
  // ensure that data is replicated to the secondary region
  TableName tableName = TableName.valueOf("testRegionReplicaReplicationWithReplicas_"
      + regionReplication);
  HTableDescriptor htd = HTU.createTableDescriptor(tableName.toString());
  htd.setRegionReplication(regionReplication);
  HTU.getHBaseAdmin().createTable(htd);
  TableName tableNameNoReplicas =
      TableName.valueOf("testRegionReplicaReplicationWithReplicas_NO_REPLICAS");
  HTU.deleteTableIfAny(tableNameNoReplicas);
  HTU.createTable(tableNameNoReplicas, HBaseTestingUtility.fam1);

  Connection connection = ConnectionFactory.createConnection(HTU.getConfiguration());
  Table table = connection.getTable(tableName);
  Table tableNoReplicas = connection.getTable(tableNameNoReplicas);

  try {
    // load some data to the non-replicated table
    HTU.loadNumericRows(tableNoReplicas, HBaseTestingUtility.fam1, 6000, 7000);

    // load the data to the table
    HTU.loadNumericRows(table, HBaseTestingUtility.fam1, 0, 1000);

    verifyReplication(tableName, regionReplication, 0, 1000);

  } finally {
    table.close();
    tableNoReplicas.close();
    HTU.deleteTableIfAny(tableNameNoReplicas);
    connection.close();
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:34,代码来源:TestRegionReplicaReplicationEndpoint.java

示例12: testRegionReplicaWithoutMemstoreReplication

import org.apache.hadoop.hbase.client.Connection; //导入方法依赖的package包/类
@Test (timeout = 240000)
public void testRegionReplicaWithoutMemstoreReplication() throws Exception {
  int regionReplication = 3;
  TableName tableName = TableName.valueOf("testRegionReplicaWithoutMemstoreReplication");
  HTableDescriptor htd = HTU.createTableDescriptor(tableName.toString());
  htd.setRegionReplication(regionReplication);
  htd.setRegionMemstoreReplication(false);
  HTU.getHBaseAdmin().createTable(htd);

  Connection connection = ConnectionFactory.createConnection(HTU.getConfiguration());
  Table table = connection.getTable(tableName);
  try {
    // write data to the primary. The replicas should not receive the data
    final int STEP = 100;
    for (int i = 0; i < 3; ++i) {
      final int startRow = i * STEP;
      final int endRow = (i + 1) * STEP;
      LOG.info("Writing data from " + startRow + " to " + endRow);
      HTU.loadNumericRows(table, HBaseTestingUtility.fam1, startRow, endRow);
      verifyReplication(tableName, regionReplication, startRow, endRow, false);

      // Flush the table, now the data should show up in the replicas
      LOG.info("flushing table");
      HTU.flush(tableName);
      verifyReplication(tableName, regionReplication, 0, endRow, true);
    }
  } finally {
    table.close();
    connection.close();
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:32,代码来源:TestRegionReplicaReplicationEndpoint.java

示例13: testRegionReplicaReplicationForFlushAndCompaction

import org.apache.hadoop.hbase.client.Connection; //导入方法依赖的package包/类
@Test (timeout = 240000)
public void testRegionReplicaReplicationForFlushAndCompaction() throws Exception {
  // Tests a table with region replication 3. Writes some data, and causes flushes and
  // compactions. Verifies that the data is readable from the replicas. Note that this
  // does not test whether the replicas actually pick up flushed files and apply compaction
  // to their stores
  int regionReplication = 3;
  TableName tableName = TableName.valueOf("testRegionReplicaReplicationForFlushAndCompaction");
  HTableDescriptor htd = HTU.createTableDescriptor(tableName.toString());
  htd.setRegionReplication(regionReplication);
  HTU.getHBaseAdmin().createTable(htd);

  Connection connection = ConnectionFactory.createConnection(HTU.getConfiguration());
  Table table = connection.getTable(tableName);

  try {
    // load the data to the table

    for (int i = 0; i < 6000; i += 1000) {
      LOG.info("Writing data from " + i + " to " + (i+1000));
      HTU.loadNumericRows(table, HBaseTestingUtility.fam1, i, i+1000);
      LOG.info("flushing table");
      HTU.flush(tableName);
      LOG.info("compacting table");
      HTU.compact(tableName, false);
    }

    verifyReplication(tableName, regionReplication, 0, 1000);
  } finally {
    table.close();
    connection.close();
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:34,代码来源:TestRegionReplicaReplicationEndpoint.java

示例14: obtainTokenForJob

import org.apache.hadoop.hbase.client.Connection; //导入方法依赖的package包/类
/**
 * Obtain an authentication token on behalf of the given user and add it to
 * the credentials for the given map reduce job.
 * @param user The user for whom to obtain the token
 * @param job The job configuration in which the token should be stored
 * @throws IOException If making a remote call to the authentication service fails
 * @throws InterruptedException If executing as the given user is interrupted
 * @deprecated Replaced by {@link #obtainTokenForJob(Connection,JobConf,User)}
 */
@Deprecated
public static void obtainTokenForJob(final JobConf job,
                                     UserGroupInformation user)
    throws IOException, InterruptedException {
  Connection conn = ConnectionFactory.createConnection(job);
  try {
    UserProvider userProvider = UserProvider.instantiate(job);
    obtainTokenForJob(conn, job, userProvider.create(user));
  } finally {
    conn.close();
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:22,代码来源:TokenUtil.java

示例15: listLabels

import org.apache.hadoop.hbase.client.Connection; //导入方法依赖的package包/类
/**
 * Retrieve the list of visibility labels defined in the system.
 * @param connection The Connection instance to use.
 * @param regex  The regular expression to filter which labels are returned.
 * @return labels The list of visibility labels defined in the system.
 * @throws Throwable
 */
public static ListLabelsResponse listLabels(Connection connection, final String regex)
    throws Throwable {
  Table table = null;
  try {
    table = connection.getTable(LABELS_TABLE_NAME);
    Batch.Call<VisibilityLabelsService, ListLabelsResponse> callable =
        new Batch.Call<VisibilityLabelsService, ListLabelsResponse>() {
          ServerRpcController controller = new ServerRpcController();
          BlockingRpcCallback<ListLabelsResponse> rpcCallback =
              new BlockingRpcCallback<ListLabelsResponse>();

          public ListLabelsResponse call(VisibilityLabelsService service) throws IOException {
            ListLabelsRequest.Builder listAuthLabelsReqBuilder = ListLabelsRequest.newBuilder();
            if (regex != null) {
              // Compile the regex here to catch any regex exception earlier.
              Pattern pattern = Pattern.compile(regex);
              listAuthLabelsReqBuilder.setRegex(pattern.toString());
            }
            service.listLabels(controller, listAuthLabelsReqBuilder.build(), rpcCallback);
            ListLabelsResponse response = rpcCallback.get();
            if (controller.failedOnException()) {
              throw controller.getFailedOn();
            }
            return response;
          }
        };
    Map<byte[], ListLabelsResponse> result =
        table.coprocessorService(VisibilityLabelsService.class, HConstants.EMPTY_BYTE_ARRAY,
          HConstants.EMPTY_BYTE_ARRAY, callable);
    return result.values().iterator().next(); // There will be exactly one region for labels
    // table and so one entry in result Map.
  }
  finally {
    if (table != null) {
      table.close();
    }
    if (connection != null) {
      connection.close();
    }
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:49,代码来源:VisibilityClient.java


注:本文中的org.apache.hadoop.hbase.client.Connection.close方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。