當前位置: 首頁>>代碼示例>>Java>>正文


Java HBaseConfiguration.createClusterConf方法代碼示例

本文整理匯總了Java中org.apache.hadoop.hbase.HBaseConfiguration.createClusterConf方法的典型用法代碼示例。如果您正苦於以下問題:Java HBaseConfiguration.createClusterConf方法的具體用法?Java HBaseConfiguration.createClusterConf怎麽用?Java HBaseConfiguration.createClusterConf使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.apache.hadoop.hbase.HBaseConfiguration的用法示例。


在下文中一共展示了HBaseConfiguration.createClusterConf方法的7個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: setConf

import org.apache.hadoop.hbase.HBaseConfiguration; //導入方法依賴的package包/類
@Override
public void setConf(Configuration otherConf) {
  String tableName = otherConf.get(OUTPUT_TABLE);
  if(tableName == null || tableName.length() <= 0) {
    throw new IllegalArgumentException("Must specify table name");
  }

  String address = otherConf.get(QUORUM_ADDRESS);
  int zkClientPort = otherConf.getInt(QUORUM_PORT, 0);
  String serverClass = otherConf.get(REGION_SERVER_CLASS);
  String serverImpl = otherConf.get(REGION_SERVER_IMPL);

  try {
    this.conf = HBaseConfiguration.createClusterConf(otherConf, address, OUTPUT_CONF_PREFIX);

    if (serverClass != null) {
      this.conf.set(HConstants.REGION_SERVER_IMPL, serverImpl);
    }
    if (zkClientPort != 0) {
      this.conf.setInt(HConstants.ZOOKEEPER_CLIENT_PORT, zkClientPort);
    }
  } catch(IOException e) {
    LOG.error(e);
    throw new RuntimeException(e);
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:27,代碼來源:TableOutputFormat.java

示例2: getPeerConf

import org.apache.hadoop.hbase.HBaseConfiguration; //導入方法依賴的package包/類
@Override
public Pair<ReplicationPeerConfig, Configuration> getPeerConf(String peerId)
    throws ReplicationException {
  ReplicationPeerConfig peerConfig = getReplicationPeerConfig(peerId);

  if (peerConfig == null) {
    return null;
  }

  Configuration otherConf;
  try {
    otherConf = HBaseConfiguration.createClusterConf(this.conf, peerConfig.getClusterKey());
  } catch (IOException e) {
    LOG.error("Can't get peer configuration for peerId=" + peerId + " because:", e);
    return null;
  }

  if (!peerConfig.getConfiguration().isEmpty()) {
    CompoundConfiguration compound = new CompoundConfiguration();
    compound.add(otherConf);
    compound.addStringMap(peerConfig.getConfiguration());
    return new Pair<ReplicationPeerConfig, Configuration>(peerConfig, compound);
  }

  return new Pair<ReplicationPeerConfig, Configuration>(peerConfig, otherConf);
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:27,代碼來源:ReplicationPeersZKImpl.java

示例3: openConnection

import org.apache.hadoop.hbase.HBaseConfiguration; //導入方法依賴的package包/類
private static Connection openConnection(Configuration conf, String zkClusterConfKey,
                                         String configPrefix)
  throws IOException {
    String zkCluster = conf.get(zkClusterConfKey);
    Configuration clusterConf = HBaseConfiguration.createClusterConf(conf,
        zkCluster, configPrefix);
    return ConnectionFactory.createConnection(clusterConf);
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:9,代碼來源:SyncTable.java

示例4: initCredentials

import org.apache.hadoop.hbase.HBaseConfiguration; //導入方法依賴的package包/類
public static void initCredentials(Job job) throws IOException {
  UserProvider userProvider = UserProvider.instantiate(job.getConfiguration());
  if (userProvider.isHadoopSecurityEnabled()) {
    // propagate delegation related props from launcher job to MR job
    if (System.getenv("HADOOP_TOKEN_FILE_LOCATION") != null) {
      job.getConfiguration().set("mapreduce.job.credentials.binary",
                                 System.getenv("HADOOP_TOKEN_FILE_LOCATION"));
    }
  }

  if (userProvider.isHBaseSecurityEnabled()) {
    try {
      // init credentials for remote cluster
      String quorumAddress = job.getConfiguration().get(TableOutputFormat.QUORUM_ADDRESS);
      User user = userProvider.getCurrent();
      if (quorumAddress != null) {
        Configuration peerConf = HBaseConfiguration.createClusterConf(job.getConfiguration(),
            quorumAddress, TableOutputFormat.OUTPUT_CONF_PREFIX);
        Connection peerConn = ConnectionFactory.createConnection(peerConf);
        try {
          TokenUtil.addTokenForJob(peerConn, user, job);
        } finally {
          peerConn.close();
        }
      }

      Connection conn = ConnectionFactory.createConnection(job.getConfiguration());
      try {
        TokenUtil.addTokenForJob(conn, user, job);
      } finally {
        conn.close();
      }
    } catch (InterruptedException ie) {
      LOG.info("Interrupted obtaining user authentication token");
      Thread.currentThread().interrupt();
    }
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:39,代碼來源:TableMapReduceUtil.java

示例5: testKey

import org.apache.hadoop.hbase.HBaseConfiguration; //導入方法依賴的package包/類
private void testKey(String ensemble, int port, String znode, Boolean multiplePortSupport)
    throws IOException {
  Configuration conf = new Configuration();
  String key = ensemble+":"+port+":"+znode;
  String ensemble2 = null;
  ZKConfig.ZKClusterKey zkClusterKey = ZKConfig.transformClusterKey(key);
  if (multiplePortSupport) {
    ensemble2 = ZKConfig.standardizeZKQuorumServerString(ensemble,
        Integer.toString(port));
    assertEquals(ensemble2, zkClusterKey.getQuorumString());
  }
  else {
    assertEquals(ensemble, zkClusterKey.getQuorumString());
  }
  assertEquals(port, zkClusterKey.getClientPort());
  assertEquals(znode, zkClusterKey.getZnodeParent());

  conf = HBaseConfiguration.createClusterConf(conf, key);
  assertEquals(zkClusterKey.getQuorumString(), conf.get(HConstants.ZOOKEEPER_QUORUM));
  assertEquals(zkClusterKey.getClientPort(), conf.getInt(HConstants.ZOOKEEPER_CLIENT_PORT, -1));
  assertEquals(zkClusterKey.getZnodeParent(), conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT));

  String reconstructedKey = ZKConfig.getZooKeeperClusterKey(conf);
  if (multiplePortSupport) {
    String key2 = ensemble2 + ":" + port + ":" + znode;
    assertEquals(key2, reconstructedKey);
  }
  else {
    assertEquals(key, reconstructedKey);
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:32,代碼來源:TestZKConfig.java

示例6: runCopyJob

import org.apache.hadoop.hbase.HBaseConfiguration; //導入方法依賴的package包/類
/**
 * Run Map-Reduce Job to perform the files copy.
 */
private void runCopyJob(final Path inputRoot, final Path outputRoot,
    final String snapshotName, final Path snapshotDir, final boolean verifyChecksum,
    final String filesUser, final String filesGroup, final int filesMode,
    final int mappers, final int bandwidthMB)
        throws IOException, InterruptedException, ClassNotFoundException {
  Configuration conf = getConf();
  if (filesGroup != null) conf.set(CONF_FILES_GROUP, filesGroup);
  if (filesUser != null) conf.set(CONF_FILES_USER, filesUser);
  if (mappers > 0) {
    conf.setInt(CONF_NUM_SPLITS, mappers);
    conf.setInt(MR_NUM_MAPS, mappers);
  }
  conf.setInt(CONF_FILES_MODE, filesMode);
  conf.setBoolean(CONF_CHECKSUM_VERIFY, verifyChecksum);
  conf.set(CONF_OUTPUT_ROOT, outputRoot.toString());
  conf.set(CONF_INPUT_ROOT, inputRoot.toString());
  conf.setInt(CONF_BANDWIDTH_MB, bandwidthMB);
  conf.set(CONF_SNAPSHOT_NAME, snapshotName);
  conf.set(CONF_SNAPSHOT_DIR, snapshotDir.toString());

  Job job = new Job(conf);
  job.setJobName("ExportSnapshot-" + snapshotName);
  job.setJarByClass(ExportSnapshot.class);
  TableMapReduceUtil.addDependencyJars(job);
  job.setMapperClass(ExportMapper.class);
  job.setInputFormatClass(ExportSnapshotInputFormat.class);
  job.setOutputFormatClass(NullOutputFormat.class);
  job.setMapSpeculativeExecution(false);
  job.setNumReduceTasks(0);

  // Acquire the delegation Tokens
  Configuration srcConf = HBaseConfiguration.createClusterConf(conf, null, CONF_SOURCE_PREFIX);
  TokenCache.obtainTokensForNamenodes(job.getCredentials(),
    new Path[] { inputRoot }, srcConf);
  Configuration destConf = HBaseConfiguration.createClusterConf(conf, null, CONF_DEST_PREFIX);
  TokenCache.obtainTokensForNamenodes(job.getCredentials(),
      new Path[] { outputRoot }, destConf);

  // Run the MR Job
  if (!job.waitForCompletion(true)) {
    // TODO: Replace the fixed string with job.getStatus().getFailureInfo()
    // when it will be available on all the supported versions.
    throw new ExportSnapshotException("Copy Files Map-Reduce Job failed");
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:49,代碼來源:ExportSnapshot.java

示例7: initCredentialsForCluster

import org.apache.hadoop.hbase.HBaseConfiguration; //導入方法依賴的package包/類
/**
 * Obtain an authentication token, for the specified cluster, on behalf of the current user
 * and add it to the credentials for the given map reduce job.
 *
 * The quorumAddress is the key to the ZK ensemble, which contains:
 * hbase.zookeeper.quorum, hbase.zookeeper.client.port and
 * zookeeper.znode.parent
 *
 * @param job The job that requires the permission.
 * @param quorumAddress string that contains the 3 required configuratins
 * @throws IOException When the authentication token cannot be obtained.
 * @deprecated Since 1.2.0, use {@link #initCredentialsForCluster(Job, Configuration)} instead.
 */
@Deprecated
public static void initCredentialsForCluster(Job job, String quorumAddress)
    throws IOException {
  Configuration peerConf = HBaseConfiguration.createClusterConf(job.getConfiguration(),
      quorumAddress);
  initCredentialsForCluster(job, peerConf);
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:21,代碼來源:TableMapReduceUtil.java


注:本文中的org.apache.hadoop.hbase.HBaseConfiguration.createClusterConf方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。