当前位置: 首页>>代码示例>>Java>>正文


Java HBaseConfiguration类代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.HBaseConfiguration的典型用法代码示例。如果您正苦于以下问题:Java HBaseConfiguration类的具体用法?Java HBaseConfiguration怎么用?Java HBaseConfiguration使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


HBaseConfiguration类属于org.apache.hadoop.hbase包,在下文中一共展示了HBaseConfiguration类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: main

import org.apache.hadoop.hbase.HBaseConfiguration; //导入依赖的package包/类
public static void main(String[] argc) throws Exception {
  Configuration conf = HBaseConfiguration.create();
  conf.addResource(new Path("file:///", System.getProperty("oozie.action.conf.xml")));

  if (System.getenv("HADOOP_TOKEN_FILE_LOCATION") != null) {
    conf.set("mapreduce.job.credentials.binary",
             System.getenv("HADOOP_TOKEN_FILE_LOCATION"));
  }

  Connection connection = ConnectionFactory.createConnection(conf);
  Admin admin = connection.getAdmin();

  System.out.println("Compacting table " + argc[0]);
  TableName tableName = TableName.valueOf(argc[0]);
  admin.majorCompact(tableName);
  while (admin.getCompactionState(tableName).toString() == "MAJOR") {
    TimeUnit.SECONDS.sleep(10);
    System.out.println("Compacting table " + argc[0]);
  }
  System.out.println("Done compacting table " + argc[0]);
}
 
开发者ID:cbaenziger,项目名称:Oozie_MajorCompaction_Example,代码行数:22,代码来源:MajorCompaction.java

示例2: open

import org.apache.hadoop.hbase.HBaseConfiguration; //导入依赖的package包/类
/**
 * 连接HBase
 */
public static void open() {
	try {
		config = HBaseConfiguration.create();
		conn = HConnectionManager.createConnection(config);
		admin = new HBaseAdmin(conn);
		hbase_table = conn.getTable(ISAXIndex.TABLE_NAME);
	} catch (IOException e) {
		e.printStackTrace();
	}
}
 
开发者ID:ItGql,项目名称:SparkIsax,代码行数:14,代码来源:HBaseUtils.java

示例3: HBasePStoreProvider

import org.apache.hadoop.hbase.HBaseConfiguration; //导入依赖的package包/类
public HBasePStoreProvider(PStoreRegistry registry) {
  @SuppressWarnings("unchecked")
  Map<String, Object> config = (Map<String, Object>) registry.getConfig().getAnyRef(DrillHBaseConstants.SYS_STORE_PROVIDER_HBASE_CONFIG);
  this.hbaseConf = HBaseConfiguration.create();
  this.hbaseConf.set(HConstants.HBASE_CLIENT_INSTANCE_ID, "drill-hbase-persistent-store-client");
  if (config != null) {
    for (Map.Entry<String, Object> entry : config.entrySet()) {
      this.hbaseConf.set(entry.getKey(), String.valueOf(entry.getValue()));
    }
  }
  this.storeTableName = registry.getConfig().getString(DrillHBaseConstants.SYS_STORE_PROVIDER_HBASE_TABLE);

  ClusterCoordinator coord = registry.getClusterCoordinator();
  if ((coord instanceof ZKClusterCoordinator)) {
    this.localEStoreProvider = null;
    this.zkEStoreProvider = new ZkEStoreProvider(((ZKClusterCoordinator)registry.getClusterCoordinator()).getCurator());
    this.zkAvailable = true;
  } else {
    this.localEStoreProvider = new LocalEStoreProvider();
    this.zkEStoreProvider = null;
    this.zkAvailable = false;
  }

}
 
开发者ID:skhalifa,项目名称:QDrill,代码行数:25,代码来源:HBasePStoreProvider.java

示例4: main

import org.apache.hadoop.hbase.HBaseConfiguration; //导入依赖的package包/类
/**
 * Parse ZooKeeper configuration from HBase XML config and run a QuorumPeer.
 * @param args String[] of command line arguments. Not used.
 */
public static void main(String[] args) {
  Configuration conf = HBaseConfiguration.create();
  try {
    Properties zkProperties = ZKConfig.makeZKProps(conf);
    writeMyID(zkProperties);
    QuorumPeerConfig zkConfig = new QuorumPeerConfig();
    zkConfig.parseProperties(zkProperties);

    // login the zookeeper server principal (if using security)
    ZKUtil.loginServer(conf, HConstants.ZK_SERVER_KEYTAB_FILE,
      HConstants.ZK_SERVER_KERBEROS_PRINCIPAL,
      zkConfig.getClientPortAddress().getHostName());

    runZKServer(zkConfig);
  } catch (Exception e) {
    e.printStackTrace();
    System.exit(-1);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:24,代码来源:HQuorumPeer.java

示例5: testRegionObserverFlushTimeStacking

import org.apache.hadoop.hbase.HBaseConfiguration; //导入依赖的package包/类
@Test
public void testRegionObserverFlushTimeStacking() throws Exception {
  byte[] ROW = Bytes.toBytes("testRow");
  byte[] TABLE = Bytes.toBytes(getClass().getName());
  byte[] A = Bytes.toBytes("A");
  byte[][] FAMILIES = new byte[][] { A };

  Configuration conf = HBaseConfiguration.create();
  Region region = initHRegion(TABLE, getClass().getName(), conf, FAMILIES);
  RegionCoprocessorHost h = region.getCoprocessorHost();
  h.load(NoDataFromFlush.class, Coprocessor.PRIORITY_HIGHEST, conf);
  h.load(EmptyRegionObsever.class, Coprocessor.PRIORITY_USER, conf);

  // put a row and flush it to disk
  Put put = new Put(ROW);
  put.add(A, A, A);
  region.put(put);
  region.flush(true);
  Get get = new Get(ROW);
  Result r = region.get(get);
  assertNull(
    "Got an unexpected number of rows - no data should be returned with the NoDataFromScan coprocessor. Found: "
        + r, r.listCells());
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:25,代码来源:TestRegionObserverScannerOpenHook.java

示例6: TestScanRow

import org.apache.hadoop.hbase.HBaseConfiguration; //导入依赖的package包/类
public TestScanRow() throws IOException, InterruptedException {
    Configuration config = HBaseConfiguration.create();

    Connection connection = ConnectionFactory.createConnection(config);
    familyName = config.get("hbase.client.tablestore.family");
    columnName = "col_1";
    columnValue = "col_1_var";

    TableName tableName = TableName.valueOf(config.get("hbase.client.tablestore.table"));
    if (!connection.getAdmin().tableExists(tableName)) {
        HTableDescriptor descriptor = new HTableDescriptor(tableName);
        connection.getAdmin().createTable(descriptor);
        TimeUnit.SECONDS.sleep(1);
    }
    table = connection.getTable(tableName);
}
 
开发者ID:aliyun,项目名称:aliyun-tablestore-hbase-client,代码行数:17,代码来源:TestScanRow.java

示例7: TestDeleteRow

import org.apache.hadoop.hbase.HBaseConfiguration; //导入依赖的package包/类
public TestDeleteRow() throws IOException,InterruptedException {
    Configuration config = HBaseConfiguration.create();

    Connection connection = ConnectionFactory.createConnection(config);
    family = config.get("hbase.client.tablestore.family");
    columnName = "col_1";
    columnValue = "col_1_var";

    TableName tableName = TableName.valueOf(config.get("hbase.client.tablestore.table"));
    if (!connection.getAdmin().tableExists(tableName)) {
        HTableDescriptor descriptor = new HTableDescriptor(tableName);
        connection.getAdmin().createTable(descriptor);
        TimeUnit.SECONDS.sleep(1);
    }
    table = connection.getTable(tableName);
}
 
开发者ID:aliyun,项目名称:aliyun-tablestore-hbase-client,代码行数:17,代码来源:TestDeleteRow.java

示例8: TestBatchRow

import org.apache.hadoop.hbase.HBaseConfiguration; //导入依赖的package包/类
public TestBatchRow() throws IOException,InterruptedException {
    Configuration config = HBaseConfiguration.create();

    Connection connection = ConnectionFactory.createConnection(config);
    family = config.get("hbase.client.tablestore.family");
    columnName = "col_1";
    columnValue = "col_1_var";

    TableName tableName = TableName.valueOf(config.get("hbase.client.tablestore.table"));
    if (!connection.getAdmin().tableExists(tableName)) {
        HTableDescriptor descriptor = new HTableDescriptor(tableName);
        connection.getAdmin().createTable(descriptor);
        TimeUnit.SECONDS.sleep(1);
    }
    table = connection.getTable(tableName);
}
 
开发者ID:aliyun,项目名称:aliyun-tablestore-hbase-client,代码行数:17,代码来源:TestBatchRow.java

示例9: init

import org.apache.hadoop.hbase.HBaseConfiguration; //导入依赖的package包/类
public void init(String additionalConf) throws IOException {
  configuration = HBaseConfiguration.create();
  HRegionServer.loadWinterConf(configuration, additionalConf);
  conn = ConnectionFactory.createConnection(configuration);
  IndexTableRelation relation;
  if (IndexType.isUserDefinedIndex(indexType))
    relation = getUserDefinedIndexTableRelation(tableName, indexType);
  else relation = getRegularIndexTableRelation(tableName, indexType);
  admin = new IndexTableAdmin(configuration, conn, relation);
  if (indexType == IndexType.LCIndex) admin.setLCIndexRange(getLCIndexRangeStr());
  //    admin.createTable(false, false);

  byte[][] splits = new byte[10][];
  for (int i = 0; i < 10; i++) {
    splits[i] = Bytes.toBytes(i * 1000);
  }
  admin.createTable(true, true, splits);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:19,代码来源:BaseRunner.java

示例10: InfoServer

import org.apache.hadoop.hbase.HBaseConfiguration; //导入依赖的package包/类
/**
 * Create a status server on the given port.
 * The jsp scripts are taken from src/hbase-webapps/<code>name</code>.
 * @param name The name of the server
 * @param bindAddress address to bind to
 * @param port The port to use on the server
 * @param findPort whether the server should start at the given port and
 * increment by 1 until it finds a free port.
 * @throws IOException e
 */
public InfoServer(String name, String bindAddress, int port, boolean findPort,
    final Configuration c)
throws IOException {
  HttpConfig httpConfig = new HttpConfig(c);
  HttpServer.Builder builder =
    new org.apache.hadoop.hbase.http.HttpServer.Builder();

    builder.setName(name).addEndpoint(URI.create(httpConfig.getSchemePrefix() +
      bindAddress + ":" +
      port)).setAppDir(HBASE_APP_DIR).setFindPort(findPort).setConf(c);
    String logDir = System.getProperty("hbase.log.dir");
    if (logDir != null) {
      builder.setLogDir(logDir);
    }
  if (httpConfig.isSecure()) {
  builder.keyPassword(HBaseConfiguration.getPassword(c, "ssl.server.keystore.keypassword", null))
    .keyStore(c.get("ssl.server.keystore.location"),
      HBaseConfiguration.getPassword(c,"ssl.server.keystore.password", null),
      c.get("ssl.server.keystore.type", "jks"))
    .trustStore(c.get("ssl.server.truststore.location"),
      HBaseConfiguration.getPassword(c, "ssl.server.truststore.password", null),
      c.get("ssl.server.truststore.type", "jks"));
  }
  this.httpServer = builder.build();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:36,代码来源:InfoServer.java

示例11: initTableReduceJob

import org.apache.hadoop.hbase.HBaseConfiguration; //导入依赖的package包/类
/**
 * Use this before submitting a TableReduce job. It will
 * appropriately set up the JobConf.
 *
 * @param table  The output table.
 * @param reducer  The reducer class to use.
 * @param job  The current job configuration to adjust.
 * @param partitioner  Partitioner to use. Pass <code>null</code> to use
 * default partitioner.
 * @param addDependencyJars upload HBase jars and jars for any of the configured
 *           job classes via the distributed cache (tmpjars).
 * @throws IOException When determining the region count fails.
 */
public static void initTableReduceJob(String table,
  Class<? extends TableReduce> reducer, JobConf job, Class partitioner,
  boolean addDependencyJars) throws IOException {
  job.setOutputFormat(TableOutputFormat.class);
  job.setReducerClass(reducer);
  job.set(TableOutputFormat.OUTPUT_TABLE, table);
  job.setOutputKeyClass(ImmutableBytesWritable.class);
  job.setOutputValueClass(Put.class);
  job.setStrings("io.serializations", job.get("io.serializations"),
      MutationSerialization.class.getName(), ResultSerialization.class.getName());
  if (partitioner == HRegionPartitioner.class) {
    job.setPartitionerClass(HRegionPartitioner.class);
    int regions =
      MetaTableAccessor.getRegionCount(HBaseConfiguration.create(job), TableName.valueOf(table));
    if (job.getNumReduceTasks() > regions) {
      job.setNumReduceTasks(regions);
    }
  } else if (partitioner != null) {
    job.setPartitionerClass(partitioner);
  }
  if (addDependencyJars) {
    addDependencyJars(job);
  }
  initCredentials(job);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:39,代码来源:TableMapReduceUtil.java

示例12: testSecurityForNonSecureHadoop

import org.apache.hadoop.hbase.HBaseConfiguration; //导入依赖的package包/类
@Test
public void testSecurityForNonSecureHadoop() {
  assertFalse("Security should be disable in non-secure Hadoop",
      User.isSecurityEnabled());

  Configuration conf = HBaseConfiguration.create();
  conf.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
  conf.set(User.HBASE_SECURITY_CONF_KEY, "kerberos");
  assertTrue("Security should be enabled", User.isHBaseSecurityEnabled(conf));

  conf = HBaseConfiguration.create();
  conf.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
  assertFalse("HBase security should not be enabled if " 
      + User.HBASE_SECURITY_CONF_KEY + " is not set accordingly",
      User.isHBaseSecurityEnabled(conf));

  conf = HBaseConfiguration.create();
  conf.set(User.HBASE_SECURITY_CONF_KEY, "kerberos");
  assertTrue("HBase security should be enabled regardless of underlying "
      + "HDFS settings", User.isHBaseSecurityEnabled(conf));
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:22,代码来源:TestUser.java

示例13: setUp

import org.apache.hadoop.hbase.HBaseConfiguration; //导入依赖的package包/类
@Before
public void setUp() throws Exception {
  this.conf = HBaseConfiguration.create(TEST_UTIL.getConfiguration());
  // this.cluster = TEST_UTIL.getDFSCluster();
  this.fs = TEST_UTIL.getDFSCluster().getFileSystem();
  this.hbaseRootDir = FSUtils.getRootDir(conf);
  this.dir = new Path(this.hbaseRootDir, TestWALObserver.class.getName());
  this.oldLogDir = new Path(this.hbaseRootDir,
      HConstants.HREGION_OLDLOGDIR_NAME);
  this.logDir = new Path(this.hbaseRootDir,
      DefaultWALProvider.getWALDirectoryName(currentTest.getMethodName()));
  this.logName = HConstants.HREGION_LOGDIR_NAME;

  if (TEST_UTIL.getDFSCluster().getFileSystem().exists(this.hbaseRootDir)) {
    TEST_UTIL.getDFSCluster().getFileSystem().delete(this.hbaseRootDir, true);
  }
  this.wals = new WALFactory(conf, null, currentTest.getMethodName());
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:19,代码来源:TestWALObserver.java

示例14: verifySplitPointScenario

import org.apache.hadoop.hbase.HBaseConfiguration; //导入依赖的package包/类
/**
 * Verifies scenario for finding a split point.
 * @param splitPointAfter Stripe to expect the split point at/after.
 * @param shouldSplitStripe If true, the split point is expected in the middle of the above
 *                          stripe; if false, should be at the end.
 * @param splitRatioToVerify Maximum split imbalance ratio.
 * @param sizes Stripe sizes.
 */
private void verifySplitPointScenario(int splitPointAfter, boolean shouldSplitStripe,
    float splitRatioToVerify, int... sizes) throws Exception {
  assertTrue(sizes.length > 1);
  ArrayList<StoreFile> sfs = new ArrayList<StoreFile>();
  for (int sizeIx = 0; sizeIx < sizes.length; ++sizeIx) {
    byte[] startKey = (sizeIx == 0) ? OPEN_KEY : Bytes.toBytes(sizeIx - 1);
    byte[] endKey = (sizeIx == sizes.length - 1) ? OPEN_KEY : Bytes.toBytes(sizeIx);
    MockStoreFile sf = createFile(sizes[sizeIx], 0, startKey, endKey);
    sf.splitPoint = Bytes.toBytes(-sizeIx); // set split point to the negative index
    sfs.add(sf);
  }

  Configuration conf = HBaseConfiguration.create();
  if (splitRatioToVerify != 0) {
    conf.setFloat(StripeStoreConfig.MAX_REGION_SPLIT_IMBALANCE_KEY, splitRatioToVerify);
  }
  StripeStoreFileManager manager = createManager(al(), conf);
  manager.addCompactionResults(al(), sfs);
  int result = Bytes.toInt(manager.getSplitPoint());
  // Either end key and thus positive index, or "middle" of the file and thus negative index.
  assertEquals(splitPointAfter * (shouldSplitStripe ? -1 : 1), result);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:31,代码来源:TestStripeStoreFileManager.java

示例15: getPeerConf

import org.apache.hadoop.hbase.HBaseConfiguration; //导入依赖的package包/类
@Override
public Pair<ReplicationPeerConfig, Configuration> getPeerConf(String peerId)
    throws ReplicationException {
  ReplicationPeerConfig peerConfig = getReplicationPeerConfig(peerId);

  if (peerConfig == null) {
    return null;
  }

  Configuration otherConf;
  try {
    otherConf = HBaseConfiguration.createClusterConf(this.conf, peerConfig.getClusterKey());
  } catch (IOException e) {
    LOG.error("Can't get peer configuration for peerId=" + peerId + " because:", e);
    return null;
  }

  if (!peerConfig.getConfiguration().isEmpty()) {
    CompoundConfiguration compound = new CompoundConfiguration();
    compound.add(otherConf);
    compound.addStringMap(peerConfig.getConfiguration());
    return new Pair<ReplicationPeerConfig, Configuration>(peerConfig, compound);
  }

  return new Pair<ReplicationPeerConfig, Configuration>(peerConfig, otherConf);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:27,代码来源:ReplicationPeersZKImpl.java


注:本文中的org.apache.hadoop.hbase.HBaseConfiguration类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。