當前位置: 首頁>>代碼示例>>Java>>正文


Java HBaseConfiguration.create方法代碼示例

本文整理匯總了Java中org.apache.hadoop.hbase.HBaseConfiguration.create方法的典型用法代碼示例。如果您正苦於以下問題:Java HBaseConfiguration.create方法的具體用法?Java HBaseConfiguration.create怎麽用?Java HBaseConfiguration.create使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.apache.hadoop.hbase.HBaseConfiguration的用法示例。


在下文中一共展示了HBaseConfiguration.create方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: main

import org.apache.hadoop.hbase.HBaseConfiguration; //導入方法依賴的package包/類
/**
 * For running a few tests of methods herein.
 * @param args
 * @throws IOException
 */
public static void main(String[] args) throws IOException {
  int count = 1024;
  int size = 10240;
  for (String arg: args) {
    if (arg.startsWith(COUNT)) {
      count = Integer.parseInt(arg.replace(COUNT, ""));
    } else if (arg.startsWith(SIZE)) {
      size = Integer.parseInt(arg.replace(SIZE, ""));
    } else {
      usage(1);
    }
  }
  IPCUtil util = new IPCUtil(HBaseConfiguration.create());
  ((Log4JLogger)IPCUtil.LOG).getLogger().setLevel(Level.ALL);
  timerTests(util, count, size,  new KeyValueCodec(), null);
  timerTests(util, count, size,  new KeyValueCodec(), new DefaultCodec());
  timerTests(util, count, size,  new KeyValueCodec(), new GzipCodec());
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:24,代碼來源:TestIPCUtil.java

示例2: setUp

import org.apache.hadoop.hbase.HBaseConfiguration; //導入方法依賴的package包/類
@Before
public void setUp() throws Exception {
  this.conf = HBaseConfiguration.create(TEST_UTIL.getConfiguration());
  // this.cluster = TEST_UTIL.getDFSCluster();
  this.fs = TEST_UTIL.getDFSCluster().getFileSystem();
  this.hbaseRootDir = FSUtils.getRootDir(conf);
  this.dir = new Path(this.hbaseRootDir, TestWALObserver.class.getName());
  this.oldLogDir = new Path(this.hbaseRootDir,
      HConstants.HREGION_OLDLOGDIR_NAME);
  this.logDir = new Path(this.hbaseRootDir,
      DefaultWALProvider.getWALDirectoryName(currentTest.getMethodName()));
  this.logName = HConstants.HREGION_LOGDIR_NAME;

  if (TEST_UTIL.getDFSCluster().getFileSystem().exists(this.hbaseRootDir)) {
    TEST_UTIL.getDFSCluster().getFileSystem().delete(this.hbaseRootDir, true);
  }
  this.wals = new WALFactory(conf, null, currentTest.getMethodName());
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:19,代碼來源:TestWALObserver.java

示例3: HTableMultiplexer

import org.apache.hadoop.hbase.HBaseConfiguration; //導入方法依賴的package包/類
/**
 * @param conn The HBase connection.
 * @param conf The HBase configuration
 * @param perRegionServerBufferQueueSize determines the max number of the buffered Put ops for
 *          each region server before dropping the request.
 */
public HTableMultiplexer(Connection conn, Configuration conf,
    int perRegionServerBufferQueueSize) {
  this.conn = (ClusterConnection) conn;
  this.pool = HTable.getDefaultExecutor(conf);
  this.retryNum = conf.getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,
      HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
  this.perRegionServerBufferQueueSize = perRegionServerBufferQueueSize;
  this.maxKeyValueSize = HTable.getMaxKeyValueSize(conf);
  this.flushPeriod = conf.getLong(TABLE_MULTIPLEXER_FLUSH_PERIOD_MS, 100);
  int initThreads = conf.getInt(TABLE_MULTIPLEXER_INIT_THREADS, 10);
  this.executor =
      Executors.newScheduledThreadPool(initThreads,
        new ThreadFactoryBuilder().setDaemon(true).setNameFormat("HTableFlushWorker-%d").build());

  this.workerConf = HBaseConfiguration.create(conf);
  // We do not do the retry because we need to reassign puts to different queues if regions are
  // moved.
  this.workerConf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 0);
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:26,代碼來源:HTableMultiplexer.java

示例4: testDefaultProvider

import org.apache.hadoop.hbase.HBaseConfiguration; //導入方法依賴的package包/類
@Test
public void testDefaultProvider() {
  Configuration conf = HBaseConfiguration.create();
  CipherProvider provider = Encryption.getCipherProvider(conf);
  assertTrue(provider instanceof DefaultCipherProvider);
  String algorithm =
      conf.get(HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES);
  assertTrue(Arrays.asList(provider.getSupportedCiphers()).contains(algorithm));
  Cipher a = Encryption.getCipher(conf, algorithm);
  assertNotNull(a);
  assertTrue(a.getProvider() instanceof DefaultCipherProvider);
  assertEquals(a.getName(), algorithm);
  assertEquals(a.getKeyLength(), AES.KEY_LENGTH);
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:15,代碼來源:TestCipherProvider.java

示例5: testExistingStripesFromL0

import org.apache.hadoop.hbase.HBaseConfiguration; //導入方法依賴的package包/類
@Test
public void testExistingStripesFromL0() throws Exception {
  Configuration conf = HBaseConfiguration.create();
  conf.setInt(StripeStoreConfig.MIN_FILES_L0_KEY, 3);
  StripeCompactionPolicy.StripeInformationProvider si = createStripes(3, KEY_A);
  verifyCompaction(
      createPolicy(conf), si, si.getLevel0Files(), null, null, si.getStripeBoundaries());
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:9,代碼來源:TestStripeCompactionPolicy.java

示例6: testSplitOffStripe

import org.apache.hadoop.hbase.HBaseConfiguration; //導入方法依賴的package包/類
@Test
public void testSplitOffStripe() throws Exception {
  Configuration conf = HBaseConfiguration.create();
  // First test everything with default split count of 2, then split into more.
  conf.setInt(StripeStoreConfig.MIN_FILES_KEY, 2);
  Long[] toSplit = new Long[] { defaultSplitSize - 2, 1L, 1L };
  Long[] noSplit = new Long[] { defaultSplitSize - 2, 1L };
  long splitTargetSize = (long)(defaultSplitSize / defaultSplitCount);
  // Don't split if not eligible for compaction.
  StripeCompactionPolicy.StripeInformationProvider si =
      createStripesWithSizes(0, 0, new Long[] { defaultSplitSize - 2, 2L });
  assertNull(createPolicy(conf).selectCompaction(si, al(), false));
  // Make sure everything is eligible.
  conf.setFloat(CompactionConfiguration.HBASE_HSTORE_COMPACTION_RATIO_KEY, 500f);
  StripeCompactionPolicy policy = createPolicy(conf);
  verifyWholeStripesCompaction(policy, si, 0, 0, null, 2, splitTargetSize);
  // Add some extra stripes...
  si = createStripesWithSizes(0, 0, noSplit, noSplit, toSplit);
  verifyWholeStripesCompaction(policy, si, 2, 2, null, 2, splitTargetSize);
  // In the middle.
  si = createStripesWithSizes(0, 0, noSplit, toSplit, noSplit);
  verifyWholeStripesCompaction(policy, si, 1, 1, null, 2, splitTargetSize);
  // No split-off with different config (larger split size).
  // However, in this case some eligible stripe will just be compacted alone.
  StripeCompactionPolicy specPolicy = createPolicy(
      conf, defaultSplitSize + 1, defaultSplitCount, defaultInitialCount, false);
  verifySingleStripeCompaction(specPolicy, si, 1, null);
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:29,代碼來源:TestStripeCompactionPolicy.java

示例7: main

import org.apache.hadoop.hbase.HBaseConfiguration; //導入方法依賴的package包/類
/**
 * Main entry point.
 *
 * @param args  The command line parameters.
 * @throws Exception When running the job fails.
 */
public static void main(String[] args) throws Exception {
  Configuration conf = HBaseConfiguration.create();
  String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs();
  if (otherArgs.length < 2) {
    usage("Wrong number of arguments: " + otherArgs.length);
    System.exit(-1);
  }
  Job job = createSubmittableJob(conf, otherArgs);
  System.exit(job.waitForCompletion(true)? 0 : 1);
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:17,代碼來源:Export.java

示例8: testCustomProvider

import org.apache.hadoop.hbase.HBaseConfiguration; //導入方法依賴的package包/類
@Test
public void testCustomProvider() {
  Configuration conf = HBaseConfiguration.create();
  conf.set(HConstants.CRYPTO_CIPHERPROVIDER_CONF_KEY, MyCipherProvider.class.getName());
  CipherProvider provider = Encryption.getCipherProvider(conf);
  assertTrue(provider instanceof MyCipherProvider);
  assertTrue(Arrays.asList(provider.getSupportedCiphers()).contains("TEST"));
  Cipher a = Encryption.getCipher(conf, "TEST");
  assertNotNull(a);
  assertTrue(a.getProvider() instanceof MyCipherProvider);
  assertEquals(a.getName(), "TEST");
  assertEquals(a.getKeyLength(), 0);
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:14,代碼來源:TestCipherProvider.java

示例9: setUpBeforeClass

import org.apache.hadoop.hbase.HBaseConfiguration; //導入方法依賴的package包/類
@BeforeClass
public static void setUpBeforeClass() throws Exception {
  Configuration conf = HBaseConfiguration.create();
  conf.setStrings(CoprocessorHost.USER_REGION_COPROCESSOR_CONF_KEY,
      TestCoprocessor.class.getName());
  util = new HBaseTestingUtility(conf);
  util.startMiniCluster();
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:9,代碼來源:TestRegionObserverBypass.java

示例10: testAutoTunerShouldBeOffWhenMaxMinRangesForMemstoreIsNotGiven

import org.apache.hadoop.hbase.HBaseConfiguration; //導入方法依賴的package包/類
@Test
public void testAutoTunerShouldBeOffWhenMaxMinRangesForMemstoreIsNotGiven() throws Exception {
  Configuration conf = HBaseConfiguration.create();
  conf.setFloat(HeapMemorySizeUtil.MEMSTORE_SIZE_KEY, 0.02f);
  conf.setFloat(HeapMemoryManager.BLOCK_CACHE_SIZE_MAX_RANGE_KEY, 0.75f);
  conf.setFloat(HeapMemoryManager.BLOCK_CACHE_SIZE_MIN_RANGE_KEY, 0.03f);
  HeapMemoryManager manager = new HeapMemoryManager(new BlockCacheStub(0),
      new MemstoreFlusherStub(0), new RegionServerStub(conf), new RegionServerAccountingStub());
  assertFalse(manager.isTunerOn());
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:11,代碼來源:TestHeapMemoryManager.java

示例11: transformFile

import org.apache.hadoop.hbase.HBaseConfiguration; //導入方法依賴的package包/類
private static void transformFile(Path input, Path output)
    throws IOException {
  Configuration conf = HBaseConfiguration.create();

  FileSystem inFS = input.getFileSystem(conf);
  FileSystem outFS = output.getFileSystem(conf);

  WAL.Reader in = WALFactory.createReaderIgnoreCustomClass(inFS, input, conf);
  WALProvider.Writer out = null;

  try {
    if (!(in instanceof ReaderBase)) {
      System.err.println("Cannot proceed, invalid reader type: " + in.getClass().getName());
      return;
    }
    boolean compress = ((ReaderBase)in).hasCompression();
    conf.setBoolean(HConstants.ENABLE_WAL_COMPRESSION, !compress);
    out = WALFactory.createWALWriter(outFS, output, conf);

    WAL.Entry e = null;
    while ((e = in.next()) != null) out.append(e);
  } finally {
    in.close();
    if (out != null) {
      out.close();
      out = null;
    }
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:30,代碼來源:Compressor.java

示例12: TestSingleColumnValueFilter

import org.apache.hadoop.hbase.HBaseConfiguration; //導入方法依賴的package包/類
public TestSingleColumnValueFilter() throws IOException, InterruptedException {
    Configuration config = HBaseConfiguration.create();

    Connection connection = ConnectionFactory.createConnection(config);
    familyName = config.get("hbase.client.tablestore.family");

    TableName tableName = TableName.valueOf(config.get("hbase.client.tablestore.table"));
    if (!connection.getAdmin().tableExists(tableName)) {
        HTableDescriptor descriptor = new HTableDescriptor(tableName);
        connection.getAdmin().createTable(descriptor);
        TimeUnit.SECONDS.sleep(1);
    }
    table = connection.getTable(tableName);
}
 
開發者ID:aliyun,項目名稱:aliyun-tablestore-hbase-client,代碼行數:15,代碼來源:TestSingleColumnValueFilter.java

示例13: main

import org.apache.hadoop.hbase.HBaseConfiguration; //導入方法依賴的package包/類
/**
 * Main entry point.
 *
 * @param args  The command line parameters.
 * @throws Exception When running the job fails.
 */
public static void main(String[] args) throws Exception {
  Configuration conf = HBaseConfiguration.create();
  String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs();
  if (otherArgs.length < 2) {
    usage("Wrong number of arguments: " + otherArgs.length);
    System.exit(-1);
  }
  String inputVersionString = System.getProperty(ResultSerialization.IMPORT_FORMAT_VER);
  if (inputVersionString != null) {
    conf.set(ResultSerialization.IMPORT_FORMAT_VER, inputVersionString);
  }
  Job job = createSubmittableJob(conf, otherArgs);
  boolean isJobSuccessful = job.waitForCompletion(true);
  if(isJobSuccessful){
    // Flush all the regions of the table
    flushRegionsIfNecessary(conf);
  }
  long inputRecords = job.getCounters().findCounter(TaskCounter.MAP_INPUT_RECORDS).getValue();
  long outputRecords = job.getCounters().findCounter(TaskCounter.MAP_OUTPUT_RECORDS).getValue();
  if (outputRecords < inputRecords) {
    System.err.println("Warning, not all records were imported (maybe filtered out).");
    if (outputRecords == 0) {
      System.err.println("If the data was exported from HBase 0.94 "+
          "consider using -Dhbase.import.version=0.94.");
    }
  }

  System.exit(job.waitForCompletion(true) ? 0 : 1);
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:36,代碼來源:Import.java

示例14: TestToHbaseDelete

import org.apache.hadoop.hbase.HBaseConfiguration; //導入方法依賴的package包/類
public TestToHbaseDelete() throws IOException,InterruptedException {
    Configuration config = HBaseConfiguration.create();
    Connection connection = ConnectionFactory.createConnection(config);
    tablestoreColumnMapping = new ColumnMapping("table-1", connection.getConfiguration());
    family = config.get("hbase.client.tablestore.family");
}
 
開發者ID:aliyun,項目名稱:aliyun-tablestore-hbase-client,代碼行數:7,代碼來源:TestToHbaseDelete.java

示例15: setUp

import org.apache.hadoop.hbase.HBaseConfiguration; //導入方法依賴的package包/類
@Before
public void setUp() throws Exception {
  CacheConfig.GLOBAL_BLOCK_CACHE_INSTANCE = null;
  this.conf = HBaseConfiguration.create();
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:6,代碼來源:TestBlockCacheReporting.java


注:本文中的org.apache.hadoop.hbase.HBaseConfiguration.create方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。