当前位置: 首页>>代码示例>>Java>>正文


Java HBaseConfiguration.create方法代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.HBaseConfiguration.create方法的典型用法代码示例。如果您正苦于以下问题:Java HBaseConfiguration.create方法的具体用法?Java HBaseConfiguration.create怎么用?Java HBaseConfiguration.create使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hbase.HBaseConfiguration的用法示例。


在下文中一共展示了HBaseConfiguration.create方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: main

import org.apache.hadoop.hbase.HBaseConfiguration; //导入方法依赖的package包/类
/**
 * For running a few tests of methods herein.
 * @param args
 * @throws IOException
 */
public static void main(String[] args) throws IOException {
  int count = 1024;
  int size = 10240;
  for (String arg: args) {
    if (arg.startsWith(COUNT)) {
      count = Integer.parseInt(arg.replace(COUNT, ""));
    } else if (arg.startsWith(SIZE)) {
      size = Integer.parseInt(arg.replace(SIZE, ""));
    } else {
      usage(1);
    }
  }
  IPCUtil util = new IPCUtil(HBaseConfiguration.create());
  ((Log4JLogger)IPCUtil.LOG).getLogger().setLevel(Level.ALL);
  timerTests(util, count, size,  new KeyValueCodec(), null);
  timerTests(util, count, size,  new KeyValueCodec(), new DefaultCodec());
  timerTests(util, count, size,  new KeyValueCodec(), new GzipCodec());
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:24,代码来源:TestIPCUtil.java

示例2: setUp

import org.apache.hadoop.hbase.HBaseConfiguration; //导入方法依赖的package包/类
@Before
public void setUp() throws Exception {
  this.conf = HBaseConfiguration.create(TEST_UTIL.getConfiguration());
  // this.cluster = TEST_UTIL.getDFSCluster();
  this.fs = TEST_UTIL.getDFSCluster().getFileSystem();
  this.hbaseRootDir = FSUtils.getRootDir(conf);
  this.dir = new Path(this.hbaseRootDir, TestWALObserver.class.getName());
  this.oldLogDir = new Path(this.hbaseRootDir,
      HConstants.HREGION_OLDLOGDIR_NAME);
  this.logDir = new Path(this.hbaseRootDir,
      DefaultWALProvider.getWALDirectoryName(currentTest.getMethodName()));
  this.logName = HConstants.HREGION_LOGDIR_NAME;

  if (TEST_UTIL.getDFSCluster().getFileSystem().exists(this.hbaseRootDir)) {
    TEST_UTIL.getDFSCluster().getFileSystem().delete(this.hbaseRootDir, true);
  }
  this.wals = new WALFactory(conf, null, currentTest.getMethodName());
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:19,代码来源:TestWALObserver.java

示例3: HTableMultiplexer

import org.apache.hadoop.hbase.HBaseConfiguration; //导入方法依赖的package包/类
/**
 * @param conn The HBase connection.
 * @param conf The HBase configuration
 * @param perRegionServerBufferQueueSize determines the max number of the buffered Put ops for
 *          each region server before dropping the request.
 */
public HTableMultiplexer(Connection conn, Configuration conf,
    int perRegionServerBufferQueueSize) {
  this.conn = (ClusterConnection) conn;
  this.pool = HTable.getDefaultExecutor(conf);
  this.retryNum = conf.getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,
      HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
  this.perRegionServerBufferQueueSize = perRegionServerBufferQueueSize;
  this.maxKeyValueSize = HTable.getMaxKeyValueSize(conf);
  this.flushPeriod = conf.getLong(TABLE_MULTIPLEXER_FLUSH_PERIOD_MS, 100);
  int initThreads = conf.getInt(TABLE_MULTIPLEXER_INIT_THREADS, 10);
  this.executor =
      Executors.newScheduledThreadPool(initThreads,
        new ThreadFactoryBuilder().setDaemon(true).setNameFormat("HTableFlushWorker-%d").build());

  this.workerConf = HBaseConfiguration.create(conf);
  // We do not do the retry because we need to reassign puts to different queues if regions are
  // moved.
  this.workerConf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 0);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:26,代码来源:HTableMultiplexer.java

示例4: testDefaultProvider

import org.apache.hadoop.hbase.HBaseConfiguration; //导入方法依赖的package包/类
@Test
public void testDefaultProvider() {
  Configuration conf = HBaseConfiguration.create();
  CipherProvider provider = Encryption.getCipherProvider(conf);
  assertTrue(provider instanceof DefaultCipherProvider);
  String algorithm =
      conf.get(HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES);
  assertTrue(Arrays.asList(provider.getSupportedCiphers()).contains(algorithm));
  Cipher a = Encryption.getCipher(conf, algorithm);
  assertNotNull(a);
  assertTrue(a.getProvider() instanceof DefaultCipherProvider);
  assertEquals(a.getName(), algorithm);
  assertEquals(a.getKeyLength(), AES.KEY_LENGTH);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:15,代码来源:TestCipherProvider.java

示例5: testExistingStripesFromL0

import org.apache.hadoop.hbase.HBaseConfiguration; //导入方法依赖的package包/类
@Test
public void testExistingStripesFromL0() throws Exception {
  Configuration conf = HBaseConfiguration.create();
  conf.setInt(StripeStoreConfig.MIN_FILES_L0_KEY, 3);
  StripeCompactionPolicy.StripeInformationProvider si = createStripes(3, KEY_A);
  verifyCompaction(
      createPolicy(conf), si, si.getLevel0Files(), null, null, si.getStripeBoundaries());
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:9,代码来源:TestStripeCompactionPolicy.java

示例6: testSplitOffStripe

import org.apache.hadoop.hbase.HBaseConfiguration; //导入方法依赖的package包/类
@Test
public void testSplitOffStripe() throws Exception {
  Configuration conf = HBaseConfiguration.create();
  // First test everything with default split count of 2, then split into more.
  conf.setInt(StripeStoreConfig.MIN_FILES_KEY, 2);
  Long[] toSplit = new Long[] { defaultSplitSize - 2, 1L, 1L };
  Long[] noSplit = new Long[] { defaultSplitSize - 2, 1L };
  long splitTargetSize = (long)(defaultSplitSize / defaultSplitCount);
  // Don't split if not eligible for compaction.
  StripeCompactionPolicy.StripeInformationProvider si =
      createStripesWithSizes(0, 0, new Long[] { defaultSplitSize - 2, 2L });
  assertNull(createPolicy(conf).selectCompaction(si, al(), false));
  // Make sure everything is eligible.
  conf.setFloat(CompactionConfiguration.HBASE_HSTORE_COMPACTION_RATIO_KEY, 500f);
  StripeCompactionPolicy policy = createPolicy(conf);
  verifyWholeStripesCompaction(policy, si, 0, 0, null, 2, splitTargetSize);
  // Add some extra stripes...
  si = createStripesWithSizes(0, 0, noSplit, noSplit, toSplit);
  verifyWholeStripesCompaction(policy, si, 2, 2, null, 2, splitTargetSize);
  // In the middle.
  si = createStripesWithSizes(0, 0, noSplit, toSplit, noSplit);
  verifyWholeStripesCompaction(policy, si, 1, 1, null, 2, splitTargetSize);
  // No split-off with different config (larger split size).
  // However, in this case some eligible stripe will just be compacted alone.
  StripeCompactionPolicy specPolicy = createPolicy(
      conf, defaultSplitSize + 1, defaultSplitCount, defaultInitialCount, false);
  verifySingleStripeCompaction(specPolicy, si, 1, null);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:29,代码来源:TestStripeCompactionPolicy.java

示例7: main

import org.apache.hadoop.hbase.HBaseConfiguration; //导入方法依赖的package包/类
/**
 * Main entry point.
 *
 * @param args  The command line parameters.
 * @throws Exception When running the job fails.
 */
public static void main(String[] args) throws Exception {
  Configuration conf = HBaseConfiguration.create();
  String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs();
  if (otherArgs.length < 2) {
    usage("Wrong number of arguments: " + otherArgs.length);
    System.exit(-1);
  }
  Job job = createSubmittableJob(conf, otherArgs);
  System.exit(job.waitForCompletion(true)? 0 : 1);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:17,代码来源:Export.java

示例8: testCustomProvider

import org.apache.hadoop.hbase.HBaseConfiguration; //导入方法依赖的package包/类
@Test
public void testCustomProvider() {
  Configuration conf = HBaseConfiguration.create();
  conf.set(HConstants.CRYPTO_CIPHERPROVIDER_CONF_KEY, MyCipherProvider.class.getName());
  CipherProvider provider = Encryption.getCipherProvider(conf);
  assertTrue(provider instanceof MyCipherProvider);
  assertTrue(Arrays.asList(provider.getSupportedCiphers()).contains("TEST"));
  Cipher a = Encryption.getCipher(conf, "TEST");
  assertNotNull(a);
  assertTrue(a.getProvider() instanceof MyCipherProvider);
  assertEquals(a.getName(), "TEST");
  assertEquals(a.getKeyLength(), 0);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:14,代码来源:TestCipherProvider.java

示例9: setUpBeforeClass

import org.apache.hadoop.hbase.HBaseConfiguration; //导入方法依赖的package包/类
@BeforeClass
public static void setUpBeforeClass() throws Exception {
  Configuration conf = HBaseConfiguration.create();
  conf.setStrings(CoprocessorHost.USER_REGION_COPROCESSOR_CONF_KEY,
      TestCoprocessor.class.getName());
  util = new HBaseTestingUtility(conf);
  util.startMiniCluster();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:9,代码来源:TestRegionObserverBypass.java

示例10: testAutoTunerShouldBeOffWhenMaxMinRangesForMemstoreIsNotGiven

import org.apache.hadoop.hbase.HBaseConfiguration; //导入方法依赖的package包/类
@Test
public void testAutoTunerShouldBeOffWhenMaxMinRangesForMemstoreIsNotGiven() throws Exception {
  Configuration conf = HBaseConfiguration.create();
  conf.setFloat(HeapMemorySizeUtil.MEMSTORE_SIZE_KEY, 0.02f);
  conf.setFloat(HeapMemoryManager.BLOCK_CACHE_SIZE_MAX_RANGE_KEY, 0.75f);
  conf.setFloat(HeapMemoryManager.BLOCK_CACHE_SIZE_MIN_RANGE_KEY, 0.03f);
  HeapMemoryManager manager = new HeapMemoryManager(new BlockCacheStub(0),
      new MemstoreFlusherStub(0), new RegionServerStub(conf), new RegionServerAccountingStub());
  assertFalse(manager.isTunerOn());
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:11,代码来源:TestHeapMemoryManager.java

示例11: transformFile

import org.apache.hadoop.hbase.HBaseConfiguration; //导入方法依赖的package包/类
private static void transformFile(Path input, Path output)
    throws IOException {
  Configuration conf = HBaseConfiguration.create();

  FileSystem inFS = input.getFileSystem(conf);
  FileSystem outFS = output.getFileSystem(conf);

  WAL.Reader in = WALFactory.createReaderIgnoreCustomClass(inFS, input, conf);
  WALProvider.Writer out = null;

  try {
    if (!(in instanceof ReaderBase)) {
      System.err.println("Cannot proceed, invalid reader type: " + in.getClass().getName());
      return;
    }
    boolean compress = ((ReaderBase)in).hasCompression();
    conf.setBoolean(HConstants.ENABLE_WAL_COMPRESSION, !compress);
    out = WALFactory.createWALWriter(outFS, output, conf);

    WAL.Entry e = null;
    while ((e = in.next()) != null) out.append(e);
  } finally {
    in.close();
    if (out != null) {
      out.close();
      out = null;
    }
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:30,代码来源:Compressor.java

示例12: TestSingleColumnValueFilter

import org.apache.hadoop.hbase.HBaseConfiguration; //导入方法依赖的package包/类
public TestSingleColumnValueFilter() throws IOException, InterruptedException {
    Configuration config = HBaseConfiguration.create();

    Connection connection = ConnectionFactory.createConnection(config);
    familyName = config.get("hbase.client.tablestore.family");

    TableName tableName = TableName.valueOf(config.get("hbase.client.tablestore.table"));
    if (!connection.getAdmin().tableExists(tableName)) {
        HTableDescriptor descriptor = new HTableDescriptor(tableName);
        connection.getAdmin().createTable(descriptor);
        TimeUnit.SECONDS.sleep(1);
    }
    table = connection.getTable(tableName);
}
 
开发者ID:aliyun,项目名称:aliyun-tablestore-hbase-client,代码行数:15,代码来源:TestSingleColumnValueFilter.java

示例13: main

import org.apache.hadoop.hbase.HBaseConfiguration; //导入方法依赖的package包/类
/**
 * Main entry point.
 *
 * @param args  The command line parameters.
 * @throws Exception When running the job fails.
 */
public static void main(String[] args) throws Exception {
  Configuration conf = HBaseConfiguration.create();
  String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs();
  if (otherArgs.length < 2) {
    usage("Wrong number of arguments: " + otherArgs.length);
    System.exit(-1);
  }
  String inputVersionString = System.getProperty(ResultSerialization.IMPORT_FORMAT_VER);
  if (inputVersionString != null) {
    conf.set(ResultSerialization.IMPORT_FORMAT_VER, inputVersionString);
  }
  Job job = createSubmittableJob(conf, otherArgs);
  boolean isJobSuccessful = job.waitForCompletion(true);
  if(isJobSuccessful){
    // Flush all the regions of the table
    flushRegionsIfNecessary(conf);
  }
  long inputRecords = job.getCounters().findCounter(TaskCounter.MAP_INPUT_RECORDS).getValue();
  long outputRecords = job.getCounters().findCounter(TaskCounter.MAP_OUTPUT_RECORDS).getValue();
  if (outputRecords < inputRecords) {
    System.err.println("Warning, not all records were imported (maybe filtered out).");
    if (outputRecords == 0) {
      System.err.println("If the data was exported from HBase 0.94 "+
          "consider using -Dhbase.import.version=0.94.");
    }
  }

  System.exit(job.waitForCompletion(true) ? 0 : 1);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:36,代码来源:Import.java

示例14: TestToHbaseDelete

import org.apache.hadoop.hbase.HBaseConfiguration; //导入方法依赖的package包/类
public TestToHbaseDelete() throws IOException,InterruptedException {
    Configuration config = HBaseConfiguration.create();
    Connection connection = ConnectionFactory.createConnection(config);
    tablestoreColumnMapping = new ColumnMapping("table-1", connection.getConfiguration());
    family = config.get("hbase.client.tablestore.family");
}
 
开发者ID:aliyun,项目名称:aliyun-tablestore-hbase-client,代码行数:7,代码来源:TestToHbaseDelete.java

示例15: setUp

import org.apache.hadoop.hbase.HBaseConfiguration; //导入方法依赖的package包/类
@Before
public void setUp() throws Exception {
  CacheConfig.GLOBAL_BLOCK_CACHE_INSTANCE = null;
  this.conf = HBaseConfiguration.create();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:6,代码来源:TestBlockCacheReporting.java


注:本文中的org.apache.hadoop.hbase.HBaseConfiguration.create方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。